Merge davem@outer-richmond.davemloft.net:src/GIT/net-2.6/
authorDavid S. Miller <davem@sunset.davemloft.net>
Sat, 10 Sep 2005 18:01:33 +0000 (11:01 -0700)
committerDavid S. Miller <davem@sunset.davemloft.net>
Sat, 10 Sep 2005 18:01:33 +0000 (11:01 -0700)
272 files changed:
COPYING
Documentation/00-INDEX
Documentation/CodingStyle
Documentation/DMA-API.txt
Documentation/DocBook/journal-api.tmpl
Documentation/DocBook/usb.tmpl
Documentation/MSI-HOWTO.txt
Documentation/RCU/RTFP.txt
Documentation/RCU/UP.txt
Documentation/RCU/checklist.txt
Documentation/RCU/rcu.txt
Documentation/RCU/whatisRCU.txt [new file with mode: 0644]
Documentation/cpu-freq/cpufreq-stats.txt
Documentation/cpusets.txt
Documentation/crypto/descore-readme.txt
Documentation/feature-removal-schedule.txt
Documentation/ioctl/cdrom.txt
Documentation/mono.txt
Documentation/networking/bonding.txt
Documentation/networking/wan-router.txt
Documentation/pci.txt
Documentation/powerpc/eeh-pci-error-recovery.txt
Documentation/s390/s390dbf.txt
Documentation/scsi/ibmmca.txt
Documentation/sound/alsa/ALSA-Configuration.txt
Documentation/sysrq.txt
Documentation/uml/UserModeLinux-HOWTO.txt
Documentation/usb/gadget_serial.txt
Documentation/video4linux/Zoran
REPORTING-BUGS
arch/alpha/kernel/alpha_ksyms.c
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/smp.c
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/common/scoop.c
arch/arm/kernel/calls.S
arch/arm/kernel/entry-common.S
arch/arm/mach-pxa/corgi.c
arch/i386/kernel/acpi/wakeup.S
arch/i386/kernel/cpu/common.c
arch/i386/kernel/io_apic.c
arch/i386/kernel/smpboot.c
arch/ia64/kernel/mca.c
arch/m32r/Kconfig
arch/m32r/kernel/smp.c
arch/mips/kernel/irixsig.c
arch/mips/kernel/sysirix.c
arch/mips/lib/dec_and_lock.c
arch/parisc/lib/Makefile
arch/parisc/lib/bitops.c
arch/parisc/lib/debuglocks.c [deleted file]
arch/ppc/Kconfig
arch/ppc/Makefile
arch/ppc/kernel/cpu_setup_6xx.S
arch/ppc/kernel/cpu_setup_power4.S
arch/ppc/kernel/dma-mapping.c
arch/ppc/kernel/head.S
arch/ppc/kernel/idle.c
arch/ppc/kernel/smp.c
arch/ppc/kernel/traps.c
arch/ppc/lib/Makefile
arch/ppc/lib/dec_and_lock.c
arch/ppc/mm/fault.c
arch/ppc/platforms/pmac_sleep.S
arch/ppc/platforms/pmac_smp.c
arch/ppc/syslib/cpc700_pic.c
arch/ppc/syslib/i8259.c
arch/ppc/syslib/open_pic2.c
arch/ppc/syslib/ppc403_pic.c
arch/ppc/syslib/xilinx_pic.c
arch/ppc64/Makefile
arch/ppc64/kernel/cpu_setup_power4.S
arch/ppc64/lib/dec_and_lock.c
arch/ppc64/lib/locks.c
arch/s390/lib/spinlock.c
arch/sh/boards/adx/irq_maskreg.c
arch/sh/boards/bigsur/io.c
arch/sh/boards/bigsur/irq.c
arch/sh/boards/cqreek/irq.c
arch/sh/boards/harp/irq.c
arch/sh/boards/overdrive/irq.c
arch/sh/boards/renesas/hs7751rvoip/irq.c
arch/sh/boards/renesas/rts7751r2d/irq.c
arch/sh/boards/renesas/systemh/irq.c
arch/sh/boards/superh/microdev/irq.c
arch/sh/cchips/hd6446x/hd64465/io.c
arch/sh/cchips/voyagergx/irq.c
arch/sh/kernel/cpu/irq_imask.c
arch/sh/kernel/cpu/irq_ipr.c
arch/sh/kernel/cpu/sh4/irq_intc2.c
arch/sh64/kernel/irq_intc.c
arch/sparc/kernel/sparc_ksyms.c
arch/sparc/lib/Makefile
arch/sparc/lib/debuglocks.c [deleted file]
arch/sparc64/kernel/process.c
arch/sparc64/kernel/sparc64_ksyms.c
arch/sparc64/lib/Makefile
arch/sparc64/lib/debuglocks.c [deleted file]
arch/v850/kernel/irq.c
arch/v850/kernel/setup.c
arch/v850/kernel/sim.c
arch/x86_64/kernel/setup.c
drivers/acpi/sleep/main.c
drivers/acpi/sleep/poweroff.c
drivers/acpi/sleep/proc.c
drivers/base/dmapool.c
drivers/block/cciss.c
drivers/block/cfq-iosched.c
drivers/block/paride/pcd.c
drivers/block/paride/pf.c
drivers/block/paride/pg.c
drivers/block/paride/pt.c
drivers/block/swim3.c
drivers/block/swim_iop.c
drivers/block/umem.c
drivers/block/xd.c
drivers/block/z2ram.c
drivers/cdrom/sbpcd.c
drivers/cdrom/sonycd535.c
drivers/char/agp/backend.c
drivers/char/applicom.c
drivers/char/ftape/lowlevel/fdc-io.c
drivers/char/hpet.c
drivers/char/hw_random.c
drivers/char/ip2/i2lib.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_watchdog.c
drivers/char/lcd.c
drivers/char/lp.c
drivers/char/mxser.c
drivers/char/n_tty.c
drivers/char/pcmcia/synclink_cs.c
drivers/ide/ide-io.c
drivers/ide/ide-tape.c
drivers/ide/ide-timing.h
drivers/ide/legacy/ide-cs.c
drivers/isdn/i4l/isdn_bsdcomp.c
drivers/isdn/i4l/isdn_common.c
drivers/md/dm-exception-store.c
drivers/md/md.c
drivers/media/common/saa7146_core.c
drivers/media/video/cpia_usb.c
drivers/media/video/stradis.c
drivers/media/video/video-buf.c
drivers/media/video/zoran_driver.c
drivers/media/video/zr36120.c
drivers/misc/hdpuftrs/hdpu_cpustate.c
drivers/mtd/devices/mtdram.c
drivers/mtd/ftl.c
drivers/net/bsd_comp.c
drivers/net/ppp_generic.c
drivers/net/tulip/de4x5.c
drivers/parisc/lasi.c
drivers/parport/ieee1284.c
drivers/parport/ieee1284_ops.c
drivers/parport/parport_pc.c
drivers/pci/pci-sysfs.c
drivers/pci/probe.c
drivers/sbus/char/bbc_envctrl.c
drivers/sbus/char/envctrl.c
drivers/scsi/53c7xx.c
drivers/scsi/ch.c
drivers/scsi/cpqfcTSinit.c
drivers/scsi/ibmmca.c
drivers/scsi/osst.c
drivers/serial/8250.c
drivers/telephony/ixj.c
drivers/usb/media/stv680.c
drivers/video/vgastate.c
fs/buffer.c
fs/cifs/connect.c
fs/cramfs/uncompress.c
fs/dcache.c
fs/jbd/transaction.c
fs/jffs/intrep.c
fs/lockd/clntproc.c
fs/namespace.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4proc.c
fs/ntfs/aops.c
fs/pipe.c
fs/reiserfs/journal.c
fs/reiserfs/super.c
fs/smbfs/proc.c
fs/xfs/linux-2.6/time.h
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_super.c
include/asm-alpha/spinlock.h
include/asm-alpha/spinlock_types.h [new file with mode: 0644]
include/asm-arm/spinlock.h
include/asm-arm/spinlock_types.h [new file with mode: 0644]
include/asm-arm/unistd.h
include/asm-arm26/hardirq.h
include/asm-i386/div64.h
include/asm-i386/processor.h
include/asm-i386/spinlock.h
include/asm-i386/spinlock_types.h [new file with mode: 0644]
include/asm-ia64/spinlock.h
include/asm-ia64/spinlock_types.h [new file with mode: 0644]
include/asm-m32r/spinlock.h
include/asm-m32r/spinlock_types.h [new file with mode: 0644]
include/asm-mips/spinlock.h
include/asm-mips/spinlock_types.h [new file with mode: 0644]
include/asm-parisc/atomic.h
include/asm-parisc/bitops.h
include/asm-parisc/cacheflush.h
include/asm-parisc/processor.h
include/asm-parisc/spinlock.h
include/asm-parisc/spinlock_types.h [new file with mode: 0644]
include/asm-parisc/system.h
include/asm-ppc/smp.h
include/asm-ppc/spinlock.h
include/asm-ppc/spinlock_types.h [new file with mode: 0644]
include/asm-ppc/system.h
include/asm-ppc64/spinlock.h
include/asm-ppc64/spinlock_types.h [new file with mode: 0644]
include/asm-s390/spinlock.h
include/asm-s390/spinlock_types.h [new file with mode: 0644]
include/asm-sh/spinlock.h
include/asm-sh/spinlock_types.h [new file with mode: 0644]
include/asm-sparc/spinlock.h
include/asm-sparc/spinlock_types.h [new file with mode: 0644]
include/asm-sparc64/spinlock.h
include/asm-sparc64/spinlock_types.h [new file with mode: 0644]
include/asm-x86_64/proto.h
include/asm-x86_64/spinlock.h
include/asm-x86_64/spinlock_types.h [new file with mode: 0644]
include/linux/bio.h
include/linux/bit_spinlock.h [new file with mode: 0644]
include/linux/blkdev.h
include/linux/chio.h
include/linux/dmapool.h
include/linux/fs.h
include/linux/jbd.h
include/linux/jiffies.h
include/linux/radix-tree.h
include/linux/reiserfs_fs.h
include/linux/sched.h
include/linux/slab.h
include/linux/spinlock.h
include/linux/spinlock_api_smp.h [new file with mode: 0644]
include/linux/spinlock_api_up.h [new file with mode: 0644]
include/linux/spinlock_types.h [new file with mode: 0644]
include/linux/spinlock_types_up.h [new file with mode: 0644]
include/linux/spinlock_up.h [new file with mode: 0644]
include/linux/time.h
include/linux/writeback.h
ipc/mqueue.c
kernel/Makefile
kernel/acct.c
kernel/compat.c
kernel/cpuset.c
kernel/sched.c
kernel/signal.c
kernel/spinlock.c
kernel/timer.c
lib/Makefile
lib/dec_and_lock.c
lib/kernel_lock.c
lib/radix-tree.c
lib/sort.c
lib/spinlock_debug.c [new file with mode: 0644]
mm/filemap.c
mm/memory.c
mm/oom_kill.c
mm/page_alloc.c
mm/slab.c
mm/swap_state.c
mm/swapfile.c
sound/isa/sb/sb16_csp.c
sound/oss/skeleton.c [deleted file]

diff --git a/COPYING b/COPYING
index 2a7e338..ca442d3 100644 (file)
--- a/COPYING
+++ b/COPYING
@@ -18,7 +18,7 @@
                       Version 2, June 1991
 
  Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-                       59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+                       51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  Everyone is permitted to copy and distribute verbatim copies
  of this license document, but changing it is not allowed.
 
@@ -321,7 +321,7 @@ the "copyright" line and a pointer to where the full notice is found.
 
     You should have received a copy of the GNU General Public License
     along with this program; if not, write to the Free Software
-    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 
 
 Also add information on how to contact you by electronic and paper mail.
index f6de52b..433cf5e 100644 (file)
@@ -277,7 +277,7 @@ tty.txt
 unicode.txt
        - info on the Unicode character/font mapping used in Linux.
 uml/
-       - directory with infomation about User Mode Linux.
+       - directory with information about User Mode Linux.
 usb/
        - directory with info regarding the Universal Serial Bus.
 video4linux/
index f25b395..22e5f90 100644 (file)
@@ -236,6 +236,9 @@ ugly), but try to avoid excess.  Instead, put the comments at the head
 of the function, telling people what it does, and possibly WHY it does
 it.
 
+When commenting the kernel API functions, please use the kerneldoc format.
+See the files Documentation/kernel-doc-nano-HOWTO.txt and scripts/kernel-doc
+for details.
 
                Chapter 8: You've made a mess of it
 
index 6ee3cd6..1af0f2d 100644 (file)
@@ -121,7 +121,7 @@ pool's device.
                        dma_addr_t addr);
 
 This puts memory back into the pool.  The pool is what was passed to
-the the pool allocation routine; the cpu and dma addresses are what
+the pool allocation routine; the cpu and dma addresses are what
 were returned when that routine allocated the memory being freed.
 
 
index 1ef6f43..341aaa4 100644 (file)
@@ -116,7 +116,7 @@ filesystem. Almost.
 
 You still need to actually journal your filesystem changes, this
 is done by wrapping them into transactions. Additionally you
-also need to wrap the modification of each of the the buffers
+also need to wrap the modification of each of the buffers
 with calls to the journal layer, so it knows what the modifications
 you are actually making are. To do this use  journal_start() which
 returns a transaction handle.
@@ -128,7 +128,7 @@ and its counterpart journal_stop(), which indicates the end of a transaction
 are nestable calls, so you can reenter a transaction if necessary,
 but remember you must call journal_stop() the same number of times as
 journal_start() before the transaction is completed (or more accurately
-leaves the the update phase). Ext3/VFS makes use of this feature to simplify 
+leaves the update phase). Ext3/VFS makes use of this feature to simplify
 quota support.
 </para>
 
index f3ef0bf..705c442 100644 (file)
@@ -841,7 +841,7 @@ usbdev_ioctl (int fd, int ifno, unsigned request, void *param)
                    File modification time is not updated by this request.
                    </para><para>
                    Those struct members are from some interface descriptor
-                   applying to the the current configuration.
+                   applying to the current configuration.
                    The interface number is the bInterfaceNumber value, and
                    the altsetting number is the bAlternateSetting value.
                    (This resets each endpoint in the interface.)
index d5032eb..63edc5f 100644 (file)
@@ -430,7 +430,7 @@ which may result in system hang. The software driver of specific
 MSI-capable hardware is responsible for whether calling
 pci_enable_msi or not. A return of zero indicates the kernel
 successfully initializes the MSI/MSI-X capability structure of the
-device funtion. The device function is now running on MSI/MSI-X mode.
+device function. The device function is now running on MSI/MSI-X mode.
 
 5.6 How to tell whether MSI/MSI-X is enabled on device function
 
index 9c6d450..fcbcbc3 100644 (file)
@@ -2,7 +2,8 @@ Read the F-ing Papers!
 
 
 This document describes RCU-related publications, and is followed by
-the corresponding bibtex entries.
+the corresponding bibtex entries.  A number of the publications may
+be found at http://www.rdrop.com/users/paulmck/RCU/.
 
 The first thing resembling RCU was published in 1980, when Kung and Lehman
 [Kung80] recommended use of a garbage collector to defer destruction
@@ -113,6 +114,10 @@ describing how to make RCU safe for soft-realtime applications [Sarma04c],
 and a paper describing SELinux performance with RCU [JamesMorris04b].
 
 
+2005 has seen further adaptation of RCU to realtime use, permitting
+preemption of RCU realtime critical sections [PaulMcKenney05a,
+PaulMcKenney05b].
+
 Bibtex Entries
 
 @article{Kung80
@@ -410,3 +415,32 @@ Oregon Health and Sciences University"
 \url{http://www.livejournal.com/users/james_morris/2153.html}
 [Viewed December 10, 2004]"
 }
+
+@unpublished{PaulMcKenney05a
+,Author="Paul E. McKenney"
+,Title="{[RFC]} {RCU} and {CONFIG\_PREEMPT\_RT} progress"
+,month="May"
+,year="2005"
+,note="Available:
+\url{http://lkml.org/lkml/2005/5/9/185}
+[Viewed May 13, 2005]"
+,annotation="
+       First publication of working lock-based deferred free patches
+       for the CONFIG_PREEMPT_RT environment.
+"
+}
+
+@conference{PaulMcKenney05b
+,Author="Paul E. McKenney and Dipankar Sarma"
+,Title="Towards Hard Realtime Response from the Linux Kernel on SMP Hardware"
+,Booktitle="linux.conf.au 2005"
+,month="April"
+,year="2005"
+,address="Canberra, Australia"
+,note="Available:
+\url{http://www.rdrop.com/users/paulmck/RCU/realtimeRCU.2005.04.23a.pdf}
+[Viewed May 13, 2005]"
+,annotation="
+       Realtime turns into making RCU yet more realtime friendly.
+"
+}
index 3bfb84b..aab4a9e 100644 (file)
@@ -8,7 +8,7 @@ is that since there is only one CPU, it should not be necessary to
 wait for anything else to get done, since there are no other CPUs for
 anything else to be happening on.  Although this approach will -sort- -of-
 work a surprising amount of the time, it is a very bad idea in general.
-This document presents two examples that demonstrate exactly how bad an
+This document presents three examples that demonstrate exactly how bad an
 idea this is.
 
 
@@ -26,6 +26,9 @@ from softirq, the list scan would find itself referencing a newly freed
 element B.  This situation can greatly decrease the life expectancy of
 your kernel.
 
+This same problem can occur if call_rcu() is invoked from a hardware
+interrupt handler.
+
 
 Example 2: Function-Call Fatality
 
@@ -44,8 +47,37 @@ its arguments would cause it to fail to make the fundamental guarantee
 underlying RCU, namely that call_rcu() defers invoking its arguments until
 all RCU read-side critical sections currently executing have completed.
 
-Quick Quiz: why is it -not- legal to invoke synchronize_rcu() in
-this case?
+Quick Quiz #1: why is it -not- legal to invoke synchronize_rcu() in
+       this case?
+
+
+Example 3: Death by Deadlock
+
+Suppose that call_rcu() is invoked while holding a lock, and that the
+callback function must acquire this same lock.  In this case, if
+call_rcu() were to directly invoke the callback, the result would
+be self-deadlock.
+
+In some cases, it would possible to restructure to code so that
+the call_rcu() is delayed until after the lock is released.  However,
+there are cases where this can be quite ugly:
+
+1.     If a number of items need to be passed to call_rcu() within
+       the same critical section, then the code would need to create
+       a list of them, then traverse the list once the lock was
+       released.
+
+2.     In some cases, the lock will be held across some kernel API,
+       so that delaying the call_rcu() until the lock is released
+       requires that the data item be passed up via a common API.
+       It is far better to guarantee that callbacks are invoked
+       with no locks held than to have to modify such APIs to allow
+       arbitrary data items to be passed back up through them.
+
+If call_rcu() directly invokes the callback, painful locking restrictions
+or API changes would be required.
+
+Quick Quiz #2: What locking restriction must RCU callbacks respect?
 
 
 Summary
@@ -53,12 +85,35 @@ Summary
 Permitting call_rcu() to immediately invoke its arguments or permitting
 synchronize_rcu() to immediately return breaks RCU, even on a UP system.
 So do not do it!  Even on a UP system, the RCU infrastructure -must-
-respect grace periods.
-
-
-Answer to Quick Quiz
-
-The calling function is scanning an RCU-protected linked list, and
-is therefore within an RCU read-side critical section.  Therefore,
-the called function has been invoked within an RCU read-side critical
-section, and is not permitted to block.
+respect grace periods, and -must- invoke callbacks from a known environment
+in which no locks are held.
+
+
+Answer to Quick Quiz #1:
+       Why is it -not- legal to invoke synchronize_rcu() in this case?
+
+       Because the calling function is scanning an RCU-protected linked
+       list, and is therefore within an RCU read-side critical section.
+       Therefore, the called function has been invoked within an RCU
+       read-side critical section, and is not permitted to block.
+
+Answer to Quick Quiz #2:
+       What locking restriction must RCU callbacks respect?
+
+       Any lock that is acquired within an RCU callback must be
+       acquired elsewhere using an _irq variant of the spinlock
+       primitive.  For example, if "mylock" is acquired by an
+       RCU callback, then a process-context acquisition of this
+       lock must use something like spin_lock_irqsave() to
+       acquire the lock.
+
+       If the process-context code were to simply use spin_lock(),
+       then, since RCU callbacks can be invoked from softirq context,
+       the callback might be called from a softirq that interrupted
+       the process-context critical section.  This would result in
+       self-deadlock.
+
+       This restriction might seem gratuitous, since very few RCU
+       callbacks acquire locks directly.  However, a great many RCU
+       callbacks do acquire locks -indirectly-, for example, via
+       the kfree() primitive.
index 8f3fb77..e118a7c 100644 (file)
@@ -43,6 +43,10 @@ over a rather long period of time, but improvements are always welcome!
        rcu_read_lock_bh()) in the read-side critical sections,
        and are also an excellent aid to readability.
 
+       As a rough rule of thumb, any dereference of an RCU-protected
+       pointer must be covered by rcu_read_lock() or rcu_read_lock_bh()
+       or by the appropriate update-side lock.
+
 3.     Does the update code tolerate concurrent accesses?
 
        The whole point of RCU is to permit readers to run without
@@ -90,7 +94,11 @@ over a rather long period of time, but improvements are always welcome!
 
                The rcu_dereference() primitive is used by the various
                "_rcu()" list-traversal primitives, such as the
-               list_for_each_entry_rcu().
+               list_for_each_entry_rcu().  Note that it is perfectly
+               legal (if redundant) for update-side code to use
+               rcu_dereference() and the "_rcu()" list-traversal
+               primitives.  This is particularly useful in code
+               that is common to readers and updaters.
 
        b.      If the list macros are being used, the list_add_tail_rcu()
                and list_add_rcu() primitives must be used in order
@@ -150,16 +158,9 @@ over a rather long period of time, but improvements are always welcome!
 
        Use of the _rcu() list-traversal primitives outside of an
        RCU read-side critical section causes no harm other than
-       a slight performance degradation on Alpha CPUs and some
-       confusion on the part of people trying to read the code.
-
-       Another way of thinking of this is "If you are holding the
-       lock that prevents the data structure from changing, why do
-       you also need RCU-based protection?"  That said, there may
-       well be situations where use of the _rcu() list-traversal
-       primitives while the update-side lock is held results in
-       simpler and more maintainable code.  The jury is still out
-       on this question.
+       a slight performance degradation on Alpha CPUs.  It can
+       also be quite helpful in reducing code bloat when common
+       code is shared between readers and updaters.
 
 10.    Conversely, if you are in an RCU read-side critical section,
        you -must- use the "_rcu()" variants of the list macros.
index eb44400..6fa0922 100644 (file)
@@ -64,6 +64,54 @@ o    I hear that RCU is patented?  What is with that?
        Of these, one was allowed to lapse by the assignee, and the
        others have been contributed to the Linux kernel under GPL.
 
+o      I hear that RCU needs work in order to support realtime kernels?
+
+       Yes, work in progress.
+
 o      Where can I find more information on RCU?
 
        See the RTFP.txt file in this directory.
+       Or point your browser at http://www.rdrop.com/users/paulmck/RCU/.
+
+o      What are all these files in this directory?
+
+
+       NMI-RCU.txt
+
+               Describes how to use RCU to implement dynamic
+               NMI handlers, which can be revectored on the fly,
+               without rebooting.
+
+       RTFP.txt
+
+               List of RCU-related publications and web sites.
+
+       UP.txt
+
+               Discussion of RCU usage in UP kernels.
+
+       arrayRCU.txt
+
+               Describes how to use RCU to protect arrays, with
+               resizeable arrays whose elements reference other
+               data structures being of the most interest.
+
+       checklist.txt
+
+               Lists things to check for when inspecting code that
+               uses RCU.
+
+       listRCU.txt
+
+               Describes how to use RCU to protect linked lists.
+               This is the simplest and most common use of RCU
+               in the Linux kernel.
+
+       rcu.txt
+
+               You are reading it!
+
+       whatisRCU.txt
+
+               Overview of how the RCU implementation works.  Along
+               the way, presents a conceptual view of RCU.
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
new file mode 100644 (file)
index 0000000..354d89c
--- /dev/null
@@ -0,0 +1,902 @@
+What is RCU?
+
+RCU is a synchronization mechanism that was added to the Linux kernel
+during the 2.5 development effort that is optimized for read-mostly
+situations.  Although RCU is actually quite simple once you understand it,
+getting there can sometimes be a challenge.  Part of the problem is that
+most of the past descriptions of RCU have been written with the mistaken
+assumption that there is "one true way" to describe RCU.  Instead,
+the experience has been that different people must take different paths
+to arrive at an understanding of RCU.  This document provides several
+different paths, as follows:
+
+1.     RCU OVERVIEW
+2.     WHAT IS RCU'S CORE API?
+3.     WHAT ARE SOME EXAMPLE USES OF CORE RCU API?
+4.     WHAT IF MY UPDATING THREAD CANNOT BLOCK?
+5.     WHAT ARE SOME SIMPLE IMPLEMENTATIONS OF RCU?
+6.     ANALOGY WITH READER-WRITER LOCKING
+7.     FULL LIST OF RCU APIs
+8.     ANSWERS TO QUICK QUIZZES
+
+People who prefer starting with a conceptual overview should focus on
+Section 1, though most readers will profit by reading this section at
+some point.  People who prefer to start with an API that they can then
+experiment with should focus on Section 2.  People who prefer to start
+with example uses should focus on Sections 3 and 4.  People who need to
+understand the RCU implementation should focus on Section 5, then dive
+into the kernel source code.  People who reason best by analogy should
+focus on Section 6.  Section 7 serves as an index to the docbook API
+documentation, and Section 8 is the traditional answer key.
+
+So, start with the section that makes the most sense to you and your
+preferred method of learning.  If you need to know everything about
+everything, feel free to read the whole thing -- but if you are really
+that type of person, you have perused the source code and will therefore
+never need this document anyway.  ;-)
+
+
+1.  RCU OVERVIEW
+
+The basic idea behind RCU is to split updates into "removal" and
+"reclamation" phases.  The removal phase removes references to data items
+within a data structure (possibly by replacing them with references to
+new versions of these data items), and can run concurrently with readers.
+The reason that it is safe to run the removal phase concurrently with
+readers is the semantics of modern CPUs guarantee that readers will see
+either the old or the new version of the data structure rather than a
+partially updated reference.  The reclamation phase does the work of reclaiming
+(e.g., freeing) the data items removed from the data structure during the
+removal phase.  Because reclaiming data items can disrupt any readers
+concurrently referencing those data items, the reclamation phase must
+not start until readers no longer hold references to those data items.
+
+Splitting the update into removal and reclamation phases permits the
+updater to perform the removal phase immediately, and to defer the
+reclamation phase until all readers active during the removal phase have
+completed, either by blocking until they finish or by registering a
+callback that is invoked after they finish.  Only readers that are active
+during the removal phase need be considered, because any reader starting
+after the removal phase will be unable to gain a reference to the removed
+data items, and therefore cannot be disrupted by the reclamation phase.
+
+So the typical RCU update sequence goes something like the following:
+
+a.     Remove pointers to a data structure, so that subsequent
+       readers cannot gain a reference to it.
+
+b.     Wait for all previous readers to complete their RCU read-side
+       critical sections.
+
+c.     At this point, there cannot be any readers who hold references
+       to the data structure, so it now may safely be reclaimed
+       (e.g., kfree()d).
+
+Step (b) above is the key idea underlying RCU's deferred destruction.
+The ability to wait until all readers are done allows RCU readers to
+use much lighter-weight synchronization, in some cases, absolutely no
+synchronization at all.  In contrast, in more conventional lock-based
+schemes, readers must use heavy-weight synchronization in order to
+prevent an updater from deleting the data structure out from under them.
+This is because lock-based updaters typically update data items in place,
+and must therefore exclude readers.  In contrast, RCU-based updaters
+typically take advantage of the fact that writes to single aligned
+pointers are atomic on modern CPUs, allowing atomic insertion, removal,
+and replacement of data items in a linked structure without disrupting
+readers.  Concurrent RCU readers can then continue accessing the old
+versions, and can dispense with the atomic operations, memory barriers,
+and communications cache misses that are so expensive on present-day
+SMP computer systems, even in absence of lock contention.
+
+In the three-step procedure shown above, the updater is performing both
+the removal and the reclamation step, but it is often helpful for an
+entirely different thread to do the reclamation, as is in fact the case
+in the Linux kernel's directory-entry cache (dcache).  Even if the same
+thread performs both the update step (step (a) above) and the reclamation
+step (step (c) above), it is often helpful to think of them separately.
+For example, RCU readers and updaters need not communicate at all,
+but RCU provides implicit low-overhead communication between readers
+and reclaimers, namely, in step (b) above.
+
+So how the heck can a reclaimer tell when a reader is done, given
+that readers are not doing any sort of synchronization operations???
+Read on to learn about how RCU's API makes this easy.
+
+
+2.  WHAT IS RCU'S CORE API?
+
+The core RCU API is quite small:
+
+a.     rcu_read_lock()
+b.     rcu_read_unlock()
+c.     synchronize_rcu() / call_rcu()
+d.     rcu_assign_pointer()
+e.     rcu_dereference()
+
+There are many other members of the RCU API, but the rest can be
+expressed in terms of these five, though most implementations instead
+express synchronize_rcu() in terms of the call_rcu() callback API.
+
+The five core RCU APIs are described below, the other 18 will be enumerated
+later.  See the kernel docbook documentation for more info, or look directly
+at the function header comments.
+
+rcu_read_lock()
+
+       void rcu_read_lock(void);
+
+       Used by a reader to inform the reclaimer that the reader is
+       entering an RCU read-side critical section.  It is illegal
+       to block while in an RCU read-side critical section, though
+       kernels built with CONFIG_PREEMPT_RCU can preempt RCU read-side
+       critical sections.  Any RCU-protected data structure accessed
+       during an RCU read-side critical section is guaranteed to remain
+       unreclaimed for the full duration of that critical section.
+       Reference counts may be used in conjunction with RCU to maintain
+       longer-term references to data structures.
+
+rcu_read_unlock()
+
+       void rcu_read_unlock(void);
+
+       Used by a reader to inform the reclaimer that the reader is
+       exiting an RCU read-side critical section.  Note that RCU
+       read-side critical sections may be nested and/or overlapping.
+
+synchronize_rcu()
+
+       void synchronize_rcu(void);
+
+       Marks the end of updater code and the beginning of reclaimer
+       code.  It does this by blocking until all pre-existing RCU
+       read-side critical sections on all CPUs have completed.
+       Note that synchronize_rcu() will -not- necessarily wait for
+       any subsequent RCU read-side critical sections to complete.
+       For example, consider the following sequence of events:
+
+                CPU 0                  CPU 1                 CPU 2
+            ----------------- ------------------------- ---------------
+        1.  rcu_read_lock()
+        2.                    enters synchronize_rcu()
+        3.                                               rcu_read_lock()
+        4.  rcu_read_unlock()
+        5.                     exits synchronize_rcu()
+        6.                                              rcu_read_unlock()
+
+       To reiterate, synchronize_rcu() waits only for ongoing RCU
+       read-side critical sections to complete, not necessarily for
+       any that begin after synchronize_rcu() is invoked.
+
+       Of course, synchronize_rcu() does not necessarily return
+       -immediately- after the last pre-existing RCU read-side critical
+       section completes.  For one thing, there might well be scheduling
+       delays.  For another thing, many RCU implementations process
+       requests in batches in order to improve efficiencies, which can
+       further delay synchronize_rcu().
+
+       Since synchronize_rcu() is the API that must figure out when
+       readers are done, its implementation is key to RCU.  For RCU
+       to be useful in all but the most read-intensive situations,
+       synchronize_rcu()'s overhead must also be quite small.
+
+       The call_rcu() API is a callback form of synchronize_rcu(),
+       and is described in more detail in a later section.  Instead of
+       blocking, it registers a function and argument which are invoked
+       after all ongoing RCU read-side critical sections have completed.
+       This callback variant is particularly useful in situations where
+       it is illegal to block.
+
+rcu_assign_pointer()
+
+       typeof(p) rcu_assign_pointer(p, typeof(p) v);
+
+       Yes, rcu_assign_pointer() -is- implemented as a macro, though it
+       would be cool to be able to declare a function in this manner.
+       (Compiler experts will no doubt disagree.)
+
+       The updater uses this function to assign a new value to an
+       RCU-protected pointer, in order to safely communicate the change
+       in value from the updater to the reader.  This function returns
+       the new value, and also executes any memory-barrier instructions
+       required for a given CPU architecture.
+
+       Perhaps more important, it serves to document which pointers
+       are protected by RCU.  That said, rcu_assign_pointer() is most
+       frequently used indirectly, via the _rcu list-manipulation
+       primitives such as list_add_rcu().
+
+rcu_dereference()
+
+       typeof(p) rcu_dereference(p);
+
+       Like rcu_assign_pointer(), rcu_dereference() must be implemented
+       as a macro.
+
+       The reader uses rcu_dereference() to fetch an RCU-protected
+       pointer, which returns a value that may then be safely
+       dereferenced.  Note that rcu_deference() does not actually
+       dereference the pointer, instead, it protects the pointer for
+       later dereferencing.  It also executes any needed memory-barrier
+       instructions for a given CPU architecture.  Currently, only Alpha
+       needs memory barriers within rcu_dereference() -- on other CPUs,
+       it compiles to nothing, not even a compiler directive.
+
+       Common coding practice uses rcu_dereference() to copy an
+       RCU-protected pointer to a local variable, then dereferences
+       this local variable, for example as follows:
+
+               p = rcu_dereference(head.next);
+               return p->data;
+
+       However, in this case, one could just as easily combine these
+       into one statement:
+
+               return rcu_dereference(head.next)->data;
+
+       If you are going to be fetching multiple fields from the
+       RCU-protected structure, using the local variable is of
+       course preferred.  Repeated rcu_dereference() calls look
+       ugly and incur unnecessary overhead on Alpha CPUs.
+
+       Note that the value returned by rcu_dereference() is valid
+       only within the enclosing RCU read-side critical section.
+       For example, the following is -not- legal:
+
+               rcu_read_lock();
+               p = rcu_dereference(head.next);
+               rcu_read_unlock();
+               x = p->address;
+               rcu_read_lock();
+               y = p->data;
+               rcu_read_unlock();
+
+       Holding a reference from one RCU read-side critical section
+       to another is just as illegal as holding a reference from
+       one lock-based critical section to another!  Similarly,
+       using a reference outside of the critical section in which
+       it was acquired is just as illegal as doing so with normal
+       locking.
+
+       As with rcu_assign_pointer(), an important function of
+       rcu_dereference() is to document which pointers are protected
+       by RCU.  And, again like rcu_assign_pointer(), rcu_dereference()
+       is typically used indirectly, via the _rcu list-manipulation
+       primitives, such as list_for_each_entry_rcu().
+
+The following diagram shows how each API communicates among the
+reader, updater, and reclaimer.
+
+
+           rcu_assign_pointer()
+                                   +--------+
+           +---------------------->| reader |---------+
+           |                       +--------+         |
+           |                           |              |
+           |                           |              | Protect:
+           |                           |              | rcu_read_lock()
+           |                           |              | rcu_read_unlock()
+           |        rcu_dereference()  |              |
+       +---------+                      |              |
+       | updater |<---------------------+              |
+       +---------+                                     V
+           |                                    +-----------+
+           +----------------------------------->| reclaimer |
+                                                +-----------+
+             Defer:
+             synchronize_rcu() & call_rcu()
+
+
+The RCU infrastructure observes the time sequence of rcu_read_lock(),
+rcu_read_unlock(), synchronize_rcu(), and call_rcu() invocations in
+order to determine when (1) synchronize_rcu() invocations may return
+to their callers and (2) call_rcu() callbacks may be invoked.  Efficient
+implementations of the RCU infrastructure make heavy use of batching in
+order to amortize their overhead over many uses of the corresponding APIs.
+
+There are no fewer than three RCU mechanisms in the Linux kernel; the
+diagram above shows the first one, which is by far the most commonly used.
+The rcu_dereference() and rcu_assign_pointer() primitives are used for
+all three mechanisms, but different defer and protect primitives are
+used as follows:
+
+       Defer                   Protect
+
+a.     synchronize_rcu()       rcu_read_lock() / rcu_read_unlock()
+       call_rcu()
+
+b.     call_rcu_bh()           rcu_read_lock_bh() / rcu_read_unlock_bh()
+
+c.     synchronize_sched()     preempt_disable() / preempt_enable()
+                               local_irq_save() / local_irq_restore()
+                               hardirq enter / hardirq exit
+                               NMI enter / NMI exit
+
+These three mechanisms are used as follows:
+
+a.     RCU applied to normal data structures.
+
+b.     RCU applied to networking data structures that may be subjected
+       to remote denial-of-service attacks.
+
+c.     RCU applied to scheduler and interrupt/NMI-handler tasks.
+
+Again, most uses will be of (a).  The (b) and (c) cases are important
+for specialized uses, but are relatively uncommon.
+
+
+3.  WHAT ARE SOME EXAMPLE USES OF CORE RCU API?
+
+This section shows a simple use of the core RCU API to protect a
+global pointer to a dynamically allocated structure.  More typical
+uses of RCU may be found in listRCU.txt, arrayRCU.txt, and NMI-RCU.txt.
+
+       struct foo {
+               int a;
+               char b;
+               long c;
+       };
+       DEFINE_SPINLOCK(foo_mutex);
+
+       struct foo *gbl_foo;
+
+       /*
+        * Create a new struct foo that is the same as the one currently
+        * pointed to by gbl_foo, except that field "a" is replaced
+        * with "new_a".  Points gbl_foo to the new structure, and
+        * frees up the old structure after a grace period.
+        *
+        * Uses rcu_assign_pointer() to ensure that concurrent readers
+        * see the initialized version of the new structure.
+        *
+        * Uses synchronize_rcu() to ensure that any readers that might
+        * have references to the old structure complete before freeing
+        * the old structure.
+        */
+       void foo_update_a(int new_a)
+       {
+               struct foo *new_fp;
+               struct foo *old_fp;
+
+               new_fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+               spin_lock(&foo_mutex);
+               old_fp = gbl_foo;
+               *new_fp = *old_fp;
+               new_fp->a = new_a;
+               rcu_assign_pointer(gbl_foo, new_fp);
+               spin_unlock(&foo_mutex);
+               synchronize_rcu();
+               kfree(old_fp);
+       }
+
+       /*
+        * Return the value of field "a" of the current gbl_foo
+        * structure.  Use rcu_read_lock() and rcu_read_unlock()
+        * to ensure that the structure does not get deleted out
+        * from under us, and use rcu_dereference() to ensure that
+        * we see the initialized version of the structure (important
+        * for DEC Alpha and for people reading the code).
+        */
+       int foo_get_a(void)
+       {
+               int retval;
+
+               rcu_read_lock();
+               retval = rcu_dereference(gbl_foo)->a;
+               rcu_read_unlock();
+               return retval;
+       }
+
+So, to sum up:
+
+o      Use rcu_read_lock() and rcu_read_unlock() to guard RCU
+       read-side critical sections.
+
+o      Within an RCU read-side critical section, use rcu_dereference()
+       to dereference RCU-protected pointers.
+
+o      Use some solid scheme (such as locks or semaphores) to
+       keep concurrent updates from interfering with each other.
+
+o      Use rcu_assign_pointer() to update an RCU-protected pointer.
+       This primitive protects concurrent readers from the updater,
+       -not- concurrent updates from each other!  You therefore still
+       need to use locking (or something similar) to keep concurrent
+       rcu_assign_pointer() primitives from interfering with each other.
+
+o      Use synchronize_rcu() -after- removing a data element from an
+       RCU-protected data structure, but -before- reclaiming/freeing
+       the data element, in order to wait for the completion of all
+       RCU read-side critical sections that might be referencing that
+       data item.
+
+See checklist.txt for additional rules to follow when using RCU.
+
+
+4.  WHAT IF MY UPDATING THREAD CANNOT BLOCK?
+
+In the example above, foo_update_a() blocks until a grace period elapses.
+This is quite simple, but in some cases one cannot afford to wait so
+long -- there might be other high-priority work to be done.
+
+In such cases, one uses call_rcu() rather than synchronize_rcu().
+The call_rcu() API is as follows:
+
+       void call_rcu(struct rcu_head * head,
+                     void (*func)(struct rcu_head *head));
+
+This function invokes func(head) after a grace period has elapsed.
+This invocation might happen from either softirq or process context,
+so the function is not permitted to block.  The foo struct needs to
+have an rcu_head structure added, perhaps as follows:
+
+       struct foo {
+               int a;
+               char b;
+               long c;
+               struct rcu_head rcu;
+       };
+
+The foo_update_a() function might then be written as follows:
+
+       /*
+        * Create a new struct foo that is the same as the one currently
+        * pointed to by gbl_foo, except that field "a" is replaced
+        * with "new_a".  Points gbl_foo to the new structure, and
+        * frees up the old structure after a grace period.
+        *
+        * Uses rcu_assign_pointer() to ensure that concurrent readers
+        * see the initialized version of the new structure.
+        *
+        * Uses call_rcu() to ensure that any readers that might have
+        * references to the old structure complete before freeing the
+        * old structure.
+        */
+       void foo_update_a(int new_a)
+       {
+               struct foo *new_fp;
+               struct foo *old_fp;
+
+               new_fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+               spin_lock(&foo_mutex);
+               old_fp = gbl_foo;
+               *new_fp = *old_fp;
+               new_fp->a = new_a;
+               rcu_assign_pointer(gbl_foo, new_fp);
+               spin_unlock(&foo_mutex);
+               call_rcu(&old_fp->rcu, foo_reclaim);
+       }
+
+The foo_reclaim() function might appear as follows:
+
+       void foo_reclaim(struct rcu_head *rp)
+       {
+               struct foo *fp = container_of(rp, struct foo, rcu);
+
+               kfree(fp);
+       }
+
+The container_of() primitive is a macro that, given a pointer into a
+struct, the type of the struct, and the pointed-to field within the
+struct, returns a pointer to the beginning of the struct.
+
+The use of call_rcu() permits the caller of foo_update_a() to
+immediately regain control, without needing to worry further about the
+old version of the newly updated element.  It also clearly shows the
+RCU distinction between updater, namely foo_update_a(), and reclaimer,
+namely foo_reclaim().
+
+The summary of advice is the same as for the previous section, except
+that we are now using call_rcu() rather than synchronize_rcu():
+
+o      Use call_rcu() -after- removing a data element from an
+       RCU-protected data structure in order to register a callback
+       function that will be invoked after the completion of all RCU
+       read-side critical sections that might be referencing that
+       data item.
+
+Again, see checklist.txt for additional rules governing the use of RCU.
+
+
+5.  WHAT ARE SOME SIMPLE IMPLEMENTATIONS OF RCU?
+
+One of the nice things about RCU is that it has extremely simple "toy"
+implementations that are a good first step towards understanding the
+production-quality implementations in the Linux kernel.  This section
+presents two such "toy" implementations of RCU, one that is implemented
+in terms of familiar locking primitives, and another that more closely
+resembles "classic" RCU.  Both are way too simple for real-world use,
+lacking both functionality and performance.  However, they are useful
+in getting a feel for how RCU works.  See kernel/rcupdate.c for a
+production-quality implementation, and see:
+
+       http://www.rdrop.com/users/paulmck/RCU
+
+for papers describing the Linux kernel RCU implementation.  The OLS'01
+and OLS'02 papers are a good introduction, and the dissertation provides
+more details on the current implementation.
+
+
+5A.  "TOY" IMPLEMENTATION #1: LOCKING
+
+This section presents a "toy" RCU implementation that is based on
+familiar locking primitives.  Its overhead makes it a non-starter for
+real-life use, as does its lack of scalability.  It is also unsuitable
+for realtime use, since it allows scheduling latency to "bleed" from
+one read-side critical section to another.
+
+However, it is probably the easiest implementation to relate to, so is
+a good starting point.
+
+It is extremely simple:
+
+       static DEFINE_RWLOCK(rcu_gp_mutex);
+
+       void rcu_read_lock(void)
+       {
+               read_lock(&rcu_gp_mutex);
+       }
+
+       void rcu_read_unlock(void)
+       {
+               read_unlock(&rcu_gp_mutex);
+       }
+
+       void synchronize_rcu(void)
+       {
+               write_lock(&rcu_gp_mutex);
+               write_unlock(&rcu_gp_mutex);
+       }
+
+[You can ignore rcu_assign_pointer() and rcu_dereference() without
+missing much.  But here they are anyway.  And whatever you do, don't
+forget about them when submitting patches making use of RCU!]
+
+       #define rcu_assign_pointer(p, v)        ({ \
+                                                       smp_wmb(); \
+                                                       (p) = (v); \
+                                               })
+
+       #define rcu_dereference(p)     ({ \
+                                       typeof(p) _________p1 = p; \
+                                       smp_read_barrier_depends(); \
+                                       (_________p1); \
+                                       })
+
+
+The rcu_read_lock() and rcu_read_unlock() primitive read-acquire
+and release a global reader-writer lock.  The synchronize_rcu()
+primitive write-acquires this same lock, then immediately releases
+it.  This means that once synchronize_rcu() exits, all RCU read-side
+critical sections that were in progress before synchonize_rcu() was
+called are guaranteed to have completed -- there is no way that
+synchronize_rcu() would have been able to write-acquire the lock
+otherwise.
+
+It is possible to nest rcu_read_lock(), since reader-writer locks may
+be recursively acquired.  Note also that rcu_read_lock() is immune
+from deadlock (an important property of RCU).  The reason for this is
+that the only thing that can block rcu_read_lock() is a synchronize_rcu().
+But synchronize_rcu() does not acquire any locks while holding rcu_gp_mutex,
+so there can be no deadlock cycle.
+
+Quick Quiz #1: Why is this argument naive?  How could a deadlock
+               occur when using this algorithm in a real-world Linux
+               kernel?  How could this deadlock be avoided?
+
+
+5B.  "TOY" EXAMPLE #2: CLASSIC RCU
+
+This section presents a "toy" RCU implementation that is based on
+"classic RCU".  It is also short on performance (but only for updates) and
+on features such as hotplug CPU and the ability to run in CONFIG_PREEMPT
+kernels.  The definitions of rcu_dereference() and rcu_assign_pointer()
+are the same as those shown in the preceding section, so they are omitted.
+
+       void rcu_read_lock(void) { }
+
+       void rcu_read_unlock(void) { }
+
+       void synchronize_rcu(void)
+       {
+               int cpu;
+
+               for_each_cpu(cpu)
+                       run_on(cpu);
+       }
+
+Note that rcu_read_lock() and rcu_read_unlock() do absolutely nothing.
+This is the great strength of classic RCU in a non-preemptive kernel:
+read-side overhead is precisely zero, at least on non-Alpha CPUs.
+And there is absolutely no way that rcu_read_lock() can possibly
+participate in a deadlock cycle!
+
+The implementation of synchronize_rcu() simply schedules itself on each
+CPU in turn.  The run_on() primitive can be implemented straightforwardly
+in terms of the sched_setaffinity() primitive.  Of course, a somewhat less
+"toy" implementation would restore the affinity upon completion rather
+than just leaving all tasks running on the last CPU, but when I said
+"toy", I meant -toy-!
+
+So how the heck is this supposed to work???
+
+Remember that it is illegal to block while in an RCU read-side critical
+section.  Therefore, if a given CPU executes a context switch, we know
+that it must have completed all preceding RCU read-side critical sections.
+Once -all- CPUs have executed a context switch, then -all- preceding
+RCU read-side critical sections will have completed.
+
+So, suppose that we remove a data item from its structure and then invoke
+synchronize_rcu().  Once synchronize_rcu() returns, we are guaranteed
+that there are no RCU read-side critical sections holding a reference
+to that data item, so we can safely reclaim it.
+
+Quick Quiz #2: Give an example where Classic RCU's read-side
+               overhead is -negative-.
+
+Quick Quiz #3:  If it is illegal to block in an RCU read-side
+               critical section, what the heck do you do in
+               PREEMPT_RT, where normal spinlocks can block???
+
+
+6.  ANALOGY WITH READER-WRITER LOCKING
+
+Although RCU can be used in many different ways, a very common use of
+RCU is analogous to reader-writer locking.  The following unified
+diff shows how closely related RCU and reader-writer locking can be.
+
+       @@ -13,15 +14,15 @@
+               struct list_head *lp;
+               struct el *p;
+
+       -       read_lock();
+       -       list_for_each_entry(p, head, lp) {
+       +       rcu_read_lock();
+       +       list_for_each_entry_rcu(p, head, lp) {
+                       if (p->key == key) {
+                               *result = p->data;
+       -                       read_unlock();
+       +                       rcu_read_unlock();
+                               return 1;
+                       }
+               }
+       -       read_unlock();
+       +       rcu_read_unlock();
+               return 0;
+        }
+
+       @@ -29,15 +30,16 @@
+        {
+               struct el *p;
+
+       -       write_lock(&listmutex);
+       +       spin_lock(&listmutex);
+               list_for_each_entry(p, head, lp) {
+                       if (p->key == key) {
+                               list_del(&p->list);
+       -                       write_unlock(&listmutex);
+       +                       spin_unlock(&listmutex);
+       +                       synchronize_rcu();
+                               kfree(p);
+                               return 1;
+                       }
+               }
+       -       write_unlock(&listmutex);
+       +       spin_unlock(&listmutex);
+               return 0;
+        }
+
+Or, for those who prefer a side-by-side listing:
+
+ 1 struct el {                          1 struct el {
+ 2   struct list_head list;             2   struct list_head list;
+ 3   long key;                          3   long key;
+ 4   spinlock_t mutex;                  4   spinlock_t mutex;
+ 5   int data;                          5   int data;
+ 6   /* Other data fields */            6   /* Other data fields */
+ 7 };                                   7 };
+ 8 spinlock_t listmutex;                8 spinlock_t listmutex;
+ 9 struct el head;                      9 struct el head;
+
+ 1 int search(long key, int *result)    1 int search(long key, int *result)
+ 2 {                                    2 {
+ 3   struct list_head *lp;              3   struct list_head *lp;
+ 4   struct el *p;                      4   struct el *p;
+ 5                                      5
+ 6   read_lock();                       6   rcu_read_lock();
+ 7   list_for_each_entry(p, head, lp) { 7   list_for_each_entry_rcu(p, head, lp) {
+ 8     if (p->key == key) {             8     if (p->key == key) {
+ 9       *result = p->data;             9       *result = p->data;
+10       read_unlock();                10       rcu_read_unlock();
+11       return 1;                     11       return 1;
+12     }                               12     }
+13   }                                 13   }
+14   read_unlock();                    14   rcu_read_unlock();
+15   return 0;                         15   return 0;
+16 }                                   16 }
+
+ 1 int delete(long key)                 1 int delete(long key)
+ 2 {                                    2 {
+ 3   struct el *p;                      3   struct el *p;
+ 4                                      4
+ 5   write_lock(&listmutex);            5   spin_lock(&listmutex);
+ 6   list_for_each_entry(p, head, lp) { 6   list_for_each_entry(p, head, lp) {
+ 7     if (p->key == key) {             7     if (p->key == key) {
+ 8       list_del(&p->list);            8       list_del(&p->list);
+ 9       write_unlock(&listmutex);      9       spin_unlock(&listmutex);
+                                       10       synchronize_rcu();
+10       kfree(p);                     11       kfree(p);
+11       return 1;                     12       return 1;
+12     }                               13     }
+13   }                                 14   }
+14   write_unlock(&listmutex);         15   spin_unlock(&listmutex);
+15   return 0;                         16   return 0;
+16 }                                   17 }
+
+Either way, the differences are quite small.  Read-side locking moves
+to rcu_read_lock() and rcu_read_unlock, update-side locking moves from
+from a reader-writer lock to a simple spinlock, and a synchronize_rcu()
+precedes the kfree().
+
+However, there is one potential catch: the read-side and update-side
+critical sections can now run concurrently.  In many cases, this will
+not be a problem, but it is necessary to check carefully regardless.
+For example, if multiple independent list updates must be seen as
+a single atomic update, converting to RCU will require special care.
+
+Also, the presence of synchronize_rcu() means that the RCU version of
+delete() can now block.  If this is a problem, there is a callback-based
+mechanism that never blocks, namely call_rcu(), that can be used in
+place of synchronize_rcu().
+
+
+7.  FULL LIST OF RCU APIs
+
+The RCU APIs are documented in docbook-format header comments in the
+Linux-kernel source code, but it helps to have a full list of the
+APIs, since there does not appear to be a way to categorize them
+in docbook.  Here is the list, by category.
+
+Markers for RCU read-side critical sections:
+
+       rcu_read_lock
+       rcu_read_unlock
+       rcu_read_lock_bh
+       rcu_read_unlock_bh
+
+RCU pointer/list traversal:
+
+       rcu_dereference
+       list_for_each_rcu               (to be deprecated in favor of
+                                        list_for_each_entry_rcu)
+       list_for_each_safe_rcu          (deprecated, not used)
+       list_for_each_entry_rcu
+       list_for_each_continue_rcu      (to be deprecated in favor of new
+                                        list_for_each_entry_continue_rcu)
+       hlist_for_each_rcu              (to be deprecated in favor of
+                                        hlist_for_each_entry_rcu)
+       hlist_for_each_entry_rcu
+
+RCU pointer update:
+
+       rcu_assign_pointer
+       list_add_rcu
+       list_add_tail_rcu
+       list_del_rcu
+       list_replace_rcu
+       hlist_del_rcu
+       hlist_add_head_rcu
+
+RCU grace period:
+
+       synchronize_kernel (deprecated)
+       synchronize_net
+       synchronize_sched
+       synchronize_rcu
+       call_rcu
+       call_rcu_bh
+
+See the comment headers in the source code (or the docbook generated
+from them) for more information.
+
+
+8.  ANSWERS TO QUICK QUIZZES
+
+Quick Quiz #1: Why is this argument naive?  How could a deadlock
+               occur when using this algorithm in a real-world Linux
+               kernel?  [Referring to the lock-based "toy" RCU
+               algorithm.]
+
+Answer:                Consider the following sequence of events:
+
+               1.      CPU 0 acquires some unrelated lock, call it
+                       "problematic_lock".
+
+               2.      CPU 1 enters synchronize_rcu(), write-acquiring
+                       rcu_gp_mutex.
+
+               3.      CPU 0 enters rcu_read_lock(), but must wait
+                       because CPU 1 holds rcu_gp_mutex.
+
+               4.      CPU 1 is interrupted, and the irq handler
+                       attempts to acquire problematic_lock.
+
+               The system is now deadlocked.
+
+               One way to avoid this deadlock is to use an approach like
+               that of CONFIG_PREEMPT_RT, where all normal spinlocks
+               become blocking locks, and all irq handlers execute in
+               the context of special tasks.  In this case, in step 4
+               above, the irq handler would block, allowing CPU 1 to
+               release rcu_gp_mutex, avoiding the deadlock.
+
+               Even in the absence of deadlock, this RCU implementation
+               allows latency to "bleed" from readers to other
+               readers through synchronize_rcu().  To see this,
+               consider task A in an RCU read-side critical section
+               (thus read-holding rcu_gp_mutex), task B blocked
+               attempting to write-acquire rcu_gp_mutex, and
+               task C blocked in rcu_read_lock() attempting to
+               read_acquire rcu_gp_mutex.  Task A's RCU read-side
+               latency is holding up task C, albeit indirectly via
+               task B.
+
+               Realtime RCU implementations therefore use a counter-based
+               approach where tasks in RCU read-side critical sections
+               cannot be blocked by tasks executing synchronize_rcu().
+
+Quick Quiz #2: Give an example where Classic RCU's read-side
+               overhead is -negative-.
+
+Answer:                Imagine a single-CPU system with a non-CONFIG_PREEMPT
+               kernel where a routing table is used by process-context
+               code, but can be updated by irq-context code (for example,
+               by an "ICMP REDIRECT" packet).  The usual way of handling
+               this would be to have the process-context code disable
+               interrupts while searching the routing table.  Use of
+               RCU allows such interrupt-disabling to be dispensed with.
+               Thus, without RCU, you pay the cost of disabling interrupts,
+               and with RCU you don't.
+
+               One can argue that the overhead of RCU in this
+               case is negative with respect to the single-CPU
+               interrupt-disabling approach.  Others might argue that
+               the overhead of RCU is merely zero, and that replacing
+               the positive overhead of the interrupt-disabling scheme
+               with the zero-overhead RCU scheme does not constitute
+               negative overhead.
+
+               In real life, of course, things are more complex.  But
+               even the theoretical possibility of negative overhead for
+               a synchronization primitive is a bit unexpected.  ;-)
+
+Quick Quiz #3:  If it is illegal to block in an RCU read-side
+               critical section, what the heck do you do in
+               PREEMPT_RT, where normal spinlocks can block???
+
+Answer:                Just as PREEMPT_RT permits preemption of spinlock
+               critical sections, it permits preemption of RCU
+               read-side critical sections.  It also permits
+               spinlocks blocking while in RCU read-side critical
+               sections.
+
+               Why the apparent inconsistency?  Because it is it
+               possible to use priority boosting to keep the RCU
+               grace periods short if need be (for example, if running
+               short of memory).  In contrast, if blocking waiting
+               for (say) network reception, there is no way to know
+               what should be boosted.  Especially given that the
+               process we need to boost might well be a human being
+               who just went out for a pizza or something.  And although
+               a computer-operated cattle prod might arouse serious
+               interest, it might also provoke serious objections.
+               Besides, how does the computer know what pizza parlor
+               the human being went to???
+
+
+ACKNOWLEDGEMENTS
+
+My thanks to the people who helped make this human-readable, including
+Jon Walpole, Josh Triplett, Serge Hallyn, and Suzanne Wood.
+
+
+For more information, see http://www.rdrop.com/users/paulmck/RCU.
index e2d1e76..6a82948 100644 (file)
@@ -36,7 +36,7 @@ cpufreq stats provides following statistics (explained in detail below).
 
 All the statistics will be from the time the stats driver has been inserted 
 to the time when a read of a particular statistic is done. Obviously, stats 
-driver will not have any information about the the frequcny transitions before
+driver will not have any information about the frequency transitions before
 the stats driver insertion.
 
 --------------------------------------------------------------------------------
index 47f4114..d17b7d2 100644 (file)
@@ -277,7 +277,7 @@ rewritten to the 'tasks' file of its cpuset.  This is done to avoid
 impacting the scheduler code in the kernel with a check for changes
 in a tasks processor placement.
 
-There is an exception to the above.  If hotplug funtionality is used
+There is an exception to the above.  If hotplug functionality is used
 to remove all the CPUs that are currently assigned to a cpuset,
 then the kernel will automatically update the cpus_allowed of all
 tasks attached to CPUs in that cpuset to allow all CPUs.  When memory
index 166474c..16e9e63 100644 (file)
@@ -1,4 +1,4 @@
-Below is the orginal README file from the descore.shar package.
+Below is the original README file from the descore.shar package.
 ------------------------------------------------------------------------------
 
 des - fast & portable DES encryption & decryption.
index 5f95d4b..784e08c 100644 (file)
@@ -17,14 +17,6 @@ Who: Greg Kroah-Hartman <greg@kroah.com>
 
 ---------------------------
 
-What:  ACPI S4bios support
-When:  May 2005
-Why:   Noone uses it, and it probably does not work, anyway. swsusp is
-       faster, more reliable, and people are actually using it.
-Who:   Pavel Machek <pavel@suse.cz>
-
----------------------------
-
 What:  io_remap_page_range() (macro or function)
 When:  September 2005
 Why:   Replaced by io_remap_pfn_range() which allows more memory space
index 4ccdcc6..8ec32cc 100644 (file)
@@ -878,7 +878,7 @@ DVD_READ_STRUCT                     Read structure
 
        error returns:
          EINVAL        physical.layer_num exceeds number of layers
-         EIO           Recieved invalid response from drive
+         EIO           Received invalid response from drive
 
 
 
index 6739ab9..807a0c7 100644 (file)
@@ -30,7 +30,7 @@ other program after you have done the following:
    Read the file 'binfmt_misc.txt' in this directory to know
    more about the configuration process.
 
-3) Add the following enries to /etc/rc.local or similar script
+3) Add the following entries to /etc/rc.local or similar script
    to be run at system startup:
 
 # Insert BINFMT_MISC module into the kernel
index 24d0294..a55f0f9 100644 (file)
@@ -1241,7 +1241,7 @@ traffic while still maintaining carrier on.
 
        If running SNMP agents, the bonding driver should be loaded
 before any network drivers participating in a bond.  This requirement
-is due to the the interface index (ipAdEntIfIndex) being associated to
+is due to the interface index (ipAdEntIfIndex) being associated to
 the first interface found with a given IP address.  That is, there is
 only one ipAdEntIfIndex for each IP address.  For example, if eth0 and
 eth1 are slaves of bond0 and the driver for eth0 is loaded before the
@@ -1937,7 +1937,7 @@ switches currently available support 802.3ad.
        If not explicitly configured (with ifconfig or ip link), the
 MAC address of the bonding device is taken from its first slave
 device.  This MAC address is then passed to all following slaves and
-remains persistent (even if the the first slave is removed) until the
+remains persistent (even if the first slave is removed) until the
 bonding device is brought down or reconfigured.
 
        If you wish to change the MAC address, you can set it with
index aea20cd..c96897a 100644 (file)
@@ -355,7 +355,7 @@ REVISION HISTORY
                                There is no functional difference between the two packages         
 
 2.0.7   Aug 26, 1999           o  Merged X25API code into WANPIPE.
-                               o  Fixed a memeory leak for X25API
+                               o  Fixed a memory leak for X25API
                                o  Updated the X25API code for 2.2.X kernels.
                                o  Improved NEM handling.   
 
@@ -514,7 +514,7 @@ beta2-2.2.0 Jan 8 2001
                                o Patches for 2.4.0 kernel
                                o Patches for 2.2.18 kernel
                                o Minor updates to PPP and CHLDC drivers.
-                                 Note: No functinal difference. 
+                                 Note: No functional difference.
 
 beta3-2.2.9    Jan 10 2001
                                o I missed the 2.2.18 kernel patches in beta2-2.2.0
index 76d28d0..711210b 100644 (file)
@@ -84,7 +84,7 @@ Each entry consists of:
 
 Most drivers don't need to use the driver_data field.  Best practice
 for use of driver_data is to use it as an index into a static list of
-equivalant device types, not to use it as a pointer.
+equivalent device types, not to use it as a pointer.
 
 Have a table entry {PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID}
 to have probe() called for every PCI device known to the system.
index 2bfe71b..e75d747 100644 (file)
@@ -134,7 +134,7 @@ pci_get_device_by_addr() will find the pci device associated
 with that address (if any).
 
 The default include/asm-ppc64/io.h macros readb(), inb(), insb(),
-etc. include a check to see if the the i/o read returned all-0xff's.
+etc. include a check to see if the i/o read returned all-0xff's.
 If so, these make a call to eeh_dn_check_failure(), which in turn
 asks the firmware if the all-ff's value is the sign of a true EEH
 error.  If it is not, processing continues as normal.  The grand
index e24fdea..e321a8e 100644 (file)
@@ -468,7 +468,7 @@ The hex_ascii view shows the data field in hex and ascii representation
 The raw view returns a bytestream as the debug areas are stored in memory.
 
 The sprintf view formats the debug entries in the same way as the sprintf
-function would do. The sprintf event/expection fuctions write to the 
+function would do. The sprintf event/expection functions write to the
 debug entry a pointer to the format string (size = sizeof(long)) 
 and for each vararg a long value. So e.g. for a debug entry with a format 
 string plus two varargs one would need to allocate a (3 * sizeof(long)) 
index 2814491..2ffb3ae 100644 (file)
    /proc/scsi/ibmmca/<host_no>. ibmmca_proc_info() provides this information.
    
    This table is quite informative for interested users. It shows the load
-   of commands on the subsystem and wether you are running the bypassed 
+   of commands on the subsystem and whether you are running the bypassed
    (software) or integrated (hardware) SCSI-command set (see below). The
    amount of accesses is shown. Read, write, modeselect is shown separately
    in order to help debugging problems with CD-ROMs or tapedrives.
index 5c49ba0..ebfcdf2 100644 (file)
@@ -1459,7 +1459,7 @@ devices where %i is sound card number from zero to seven.
 To auto-load an ALSA driver for OSS services, define the string
 'sound-slot-%i' where %i means the slot number for OSS, which
 corresponds to the card index of ALSA.  Usually, define this
-as the the same card module.
+as the same card module.
 
 An example configuration for a single emu10k1 card is like below:
 ----- /etc/modprobe.conf
index 136d817..baf17b3 100644 (file)
@@ -171,7 +171,7 @@ the header 'include/linux/sysrq.h', this will define everything else you need.
 Next, you must create a sysrq_key_op struct, and populate it with A) the key
 handler function you will use, B) a help_msg string, that will print when SysRQ
 prints help, and C) an action_msg string, that will print right before your
-handler is called. Your handler must conform to the protoype in 'sysrq.h'.
+handler is called. Your handler must conform to the prototype in 'sysrq.h'.
 
 After the sysrq_key_op is created, you can call the macro 
 register_sysrq_key(int key, struct sysrq_key_op *op_p) that is defined in
index 0c7b654..544430e 100644 (file)
   If you want to access files on the host machine from inside UML, you
   can treat it as a separate machine and either nfs mount directories
   from the host or copy files into the virtual machine with scp or rcp.
-  However, since UML is running on the the host, it can access those
+  However, since UML is running on the host, it can access those
   files just like any other process and make them available inside the
   virtual machine without needing to use the network.
 
index a938c3d..815f5c2 100644 (file)
@@ -20,7 +20,7 @@ License along with this program; if not, write to the Free
 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
 MA 02111-1307 USA.
 
-This document and the the gadget serial driver itself are
+This document and the gadget serial driver itself are
 Copyright (C) 2004 by Al Borchers (alborchers@steinerpoint.com).
 
 If you have questions, problems, or suggestions for this driver
index 01425c2..52c94bd 100644 (file)
@@ -222,7 +222,7 @@ was introduced in 1991, is used in the DC10 old
 can generate: PAL , NTSC , SECAM
 
 The adv717x, should be able to produce PAL N. But you find nothing PAL N 
-specific in the the registers. Seem that you have to reuse a other standard
+specific in the registers. Seem that you have to reuse a other standard
 to generate PAL N, maybe it would work if you use the PAL M settings. 
 
 ==========================
index 224c347..f9da827 100644 (file)
@@ -9,7 +9,7 @@ screen please read "Documentation/oops-tracing.txt" before posting your
 bug report. This explains what you should do with the "Oops" information
 to make it useful to the recipient.
 
-      Send the output the maintainer of the kernel area that seems to
+      Send the output to the maintainer of the kernel area that seems to
 be involved with the problem. Don't worry too much about getting the
 wrong person. If you are unsure send it to the person responsible for the
 code relevant to what you were doing. If it occurs repeatably try and
@@ -18,15 +18,15 @@ The list of maintainers is in the MAINTAINERS file in this directory.
 
       If it is a security bug, please copy the Security Contact listed
 in the MAINTAINERS file.  They can help coordinate bugfix and disclosure.
-See Documentation/SecurityBugs for more infomation.
+See Documentation/SecurityBugs for more information.
 
       If you are totally stumped as to whom to send the report, send it to
 linux-kernel@vger.kernel.org. (For more information on the linux-kernel
 mailing list see http://www.tux.org/lkml/).
 
-This is a suggested format for a bug report sent to the Linux kernel mailing 
-list. Having a standardized bug report form makes it easier  for you not to 
-overlook things, and easier for the developers to find the pieces of 
+This is a suggested format for a bug report sent to the Linux kernel mailing
+list. Having a standardized bug report form makes it easier for you not to
+overlook things, and easier for the developers to find the pieces of
 information they're really interested in. Don't feel you have to follow it.
 
       First run the ver_linux script included as scripts/ver_linux, which
@@ -35,9 +35,9 @@ the command "sh scripts/ver_linux".
 
 Use that information to fill in all fields of the bug report form, and
 post it to the mailing list with a subject of "PROBLEM: <one line
-summary from [1.]>" for easy identification by the developers    
+summary from [1.]>" for easy identification by the developers.
 
-[1.] One line summary of the problem:    
+[1.] One line summary of the problem:
 [2.] Full description of the problem/report:
 [3.] Keywords (i.e., modules, networking, kernel):
 [4.] Kernel version (from /proc/version):
index fc5ef90..24ae9a3 100644 (file)
@@ -185,15 +185,6 @@ EXPORT_SYMBOL(smp_num_cpus);
 EXPORT_SYMBOL(smp_call_function);
 EXPORT_SYMBOL(smp_call_function_on_cpu);
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#ifdef CONFIG_DEBUG_SPINLOCK
-EXPORT_SYMBOL(_raw_spin_unlock);
-EXPORT_SYMBOL(debug_spin_lock);
-EXPORT_SYMBOL(debug_spin_trylock);
-#endif
-#ifdef CONFIG_DEBUG_RWLOCK
-EXPORT_SYMBOL(_raw_write_lock);
-EXPORT_SYMBOL(_raw_read_lock);
-#endif
 EXPORT_SYMBOL(cpu_present_mask);
 #endif /* CONFIG_SMP */
 
index 2b03418..0636116 100644 (file)
@@ -1154,8 +1154,7 @@ osf_usleep_thread(struct timeval32 __user *sleep, struct timeval32 __user *remai
 
        ticks = timeval_to_jiffies(&tmp);
 
-       current->state = TASK_INTERRUPTIBLE;
-       ticks = schedule_timeout(ticks);
+       ticks = schedule_timeout_interruptible(ticks);
 
        if (remain) {
                jiffies_to_timeval(ticks, &tmp);
index e211aa7..da0be34 100644 (file)
@@ -989,175 +989,3 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 
        preempt_enable();
 }
-\f
-#ifdef CONFIG_DEBUG_SPINLOCK
-void
-_raw_spin_unlock(spinlock_t * lock)
-{
-       mb();
-       lock->lock = 0;
-
-       lock->on_cpu = -1;
-       lock->previous = NULL;
-       lock->task = NULL;
-       lock->base_file = "none";
-       lock->line_no = 0;
-}
-
-void
-debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       long tmp;
-       long stuck;
-       void *inline_pc = __builtin_return_address(0);
-       unsigned long started = jiffies;
-       int printed = 0;
-       int cpu = smp_processor_id();
-
-       stuck = 1L << 30;
- try_again:
-
-       /* Use sub-sections to put the actual loop at the end
-          of this object file's text section so as to perfect
-          branch prediction.  */
-       __asm__ __volatile__(
-       "1:     ldl_l   %0,%1\n"
-       "       subq    %2,1,%2\n"
-       "       blbs    %0,2f\n"
-       "       or      %0,1,%0\n"
-       "       stl_c   %0,%1\n"
-       "       beq     %0,3f\n"
-       "4:     mb\n"
-       ".subsection 2\n"
-       "2:     ldl     %0,%1\n"
-       "       subq    %2,1,%2\n"
-       "3:     blt     %2,4b\n"
-       "       blbs    %0,2b\n"
-       "       br      1b\n"
-       ".previous"
-       : "=r" (tmp), "=m" (lock->lock), "=r" (stuck)
-       : "m" (lock->lock), "2" (stuck) : "memory");
-
-       if (stuck < 0) {
-               printk(KERN_WARNING
-                      "%s:%d spinlock stuck in %s at %p(%d)"
-                      " owner %s at %p(%d) %s:%d\n",
-                      base_file, line_no,
-                      current->comm, inline_pc, cpu,
-                      lock->task->comm, lock->previous,
-                      lock->on_cpu, lock->base_file, lock->line_no);
-               stuck = 1L << 36;
-               printed = 1;
-               goto try_again;
-       }
-
-       /* Exiting.  Got the lock.  */
-       lock->on_cpu = cpu;
-       lock->previous = inline_pc;
-       lock->task = current;
-       lock->base_file = base_file;
-       lock->line_no = line_no;
-
-       if (printed) {
-               printk(KERN_WARNING
-                      "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
-                      base_file, line_no, current->comm, inline_pc,
-                      cpu, jiffies - started);
-       }
-}
-
-int
-debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       int ret;
-       if ((ret = !test_and_set_bit(0, lock))) {
-               lock->on_cpu = smp_processor_id();
-               lock->previous = __builtin_return_address(0);
-               lock->task = current;
-       } else {
-               lock->base_file = base_file;
-               lock->line_no = line_no;
-       }
-       return ret;
-}
-#endif /* CONFIG_DEBUG_SPINLOCK */
-\f
-#ifdef CONFIG_DEBUG_RWLOCK
-void _raw_write_lock(rwlock_t * lock)
-{
-       long regx, regy;
-       int stuck_lock, stuck_reader;
-       void *inline_pc = __builtin_return_address(0);
-
- try_again:
-
-       stuck_lock = 1<<30;
-       stuck_reader = 1<<30;
-
-       __asm__ __volatile__(
-       "1:     ldl_l   %1,%0\n"
-       "       blbs    %1,6f\n"
-       "       blt     %1,8f\n"
-       "       mov     1,%1\n"
-       "       stl_c   %1,%0\n"
-       "       beq     %1,6f\n"
-       "4:     mb\n"
-       ".subsection 2\n"
-       "6:     blt     %3,4b   # debug\n"
-       "       subl    %3,1,%3 # debug\n"
-       "       ldl     %1,%0\n"
-       "       blbs    %1,6b\n"
-       "8:     blt     %4,4b   # debug\n"
-       "       subl    %4,1,%4 # debug\n"
-       "       ldl     %1,%0\n"
-       "       blt     %1,8b\n"
-       "       br      1b\n"
-       ".previous"
-       : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy),
-         "=&r" (stuck_lock), "=&r" (stuck_reader)
-       : "m" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory");
-
-       if (stuck_lock < 0) {
-               printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc);
-               goto try_again;
-       }
-       if (stuck_reader < 0) {
-               printk(KERN_WARNING "write_lock stuck on readers at %p\n",
-                      inline_pc);
-               goto try_again;
-       }
-}
-
-void _raw_read_lock(rwlock_t * lock)
-{
-       long regx;
-       int stuck_lock;
-       void *inline_pc = __builtin_return_address(0);
-
- try_again:
-
-       stuck_lock = 1<<30;
-
-       __asm__ __volatile__(
-       "1:     ldl_l   %1,%0;"
-       "       blbs    %1,6f;"
-       "       subl    %1,2,%1;"
-       "       stl_c   %1,%0;"
-       "       beq     %1,6f;"
-       "4:     mb\n"
-       ".subsection 2\n"
-       "6:     ldl     %1,%0;"
-       "       blt     %2,4b   # debug\n"
-       "       subl    %2,1,%2 # debug\n"
-       "       blbs    %1,6b;"
-       "       br      1b\n"
-       ".previous"
-       : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock)
-       : "m" (*(volatile int *)lock), "2" (stuck_lock) : "memory");
-
-       if (stuck_lock < 0) {
-               printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc);
-               goto try_again;
-       }
-}
-#endif /* CONFIG_DEBUG_RWLOCK */
index 0f2899b..11fff04 100644 (file)
@@ -326,8 +326,8 @@ config SMP
          processor machines. On a single processor machine, the kernel will
          run faster if you say N here.
 
-         See also the <file:Documentation/smp.tex>,
-         <file:Documentation/smp.txt>, <file:Documentation/i386/IO-APIC.txt>,
+         See also the <file:Documentation/smp.txt>,
+         <file:Documentation/i386/IO-APIC.txt>,
          <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
          <http://www.linuxdoc.org/docs.html#howto>.
 
index 45a5709..5d3acff 100644 (file)
@@ -53,7 +53,7 @@ config DEBUG_LL
        bool "Kernel low-level debugging functions"
        depends on DEBUG_KERNEL
        help
-         Say Y here to include definitions of printascii, printchar, printhex
+         Say Y here to include definitions of printascii, printch, printhex
          in the kernel.  This is helpful if you are debugging code that
          executes before the console is initialized.
 
index 688a595..d3a04c2 100644 (file)
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(read_scoop_reg);
 EXPORT_SYMBOL(write_scoop_reg);
 
 #ifdef CONFIG_PM
-static int scoop_suspend(struct device *dev, uint32_t state, uint32_t level)
+static int scoop_suspend(struct device *dev, pm_message_t state, uint32_t level)
 {
        if (level == SUSPEND_POWER_DOWN) {
                struct scoop_dev *sdev = dev_get_drvdata(dev);
index db07ce4..949ec44 100644 (file)
@@ -10,7 +10,7 @@
  *  This file is included twice in entry-common.S
  */
 #ifndef NR_syscalls
-#define NR_syscalls 320
+#define NR_syscalls 328
 #else
 
 __syscall_start:
@@ -333,6 +333,9 @@ __syscall_start:
                .long   sys_inotify_init
                .long   sys_inotify_add_watch
                .long   sys_inotify_rm_watch
+               .long   sys_mbind_wrapper
+/* 320 */      .long   sys_get_mempolicy
+               .long   sys_set_mempolicy
 __syscall_end:
 
                .rept   NR_syscalls - (__syscall_end - __syscall_start) / 4
index 6281d48..db302c6 100644 (file)
@@ -269,6 +269,10 @@ sys_arm_fadvise64_64_wrapper:
                str     r5, [sp, #4]            @ push r5 to stack
                b       sys_arm_fadvise64_64
 
+sys_mbind_wrapper:
+               str     r5, [sp, #4]
+               b       sys_mbind
+
 /*
  * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
  * offset, we return EINVAL.
index 29185ac..07b5dd4 100644 (file)
@@ -131,27 +131,12 @@ static struct platform_device corgits_device = {
 /*
  * MMC/SD Device
  *
- * The card detect interrupt isn't debounced so we delay it by HZ/4
+ * The card detect interrupt isn't debounced so we delay it by 250ms
  * to give the card a chance to fully insert/eject.
  */
-static struct mmc_detect {
-       struct timer_list detect_timer;
-       void *devid;
-} mmc_detect;
+static struct pxamci_platform_data corgi_mci_platform_data;
 
-static void mmc_detect_callback(unsigned long data)
-{
-       mmc_detect_change(mmc_detect.devid);
-}
-
-static irqreturn_t corgi_mmc_detect_int(int irq, void *devid, struct pt_regs *regs)
-{
-       mmc_detect.devid=devid;
-       mod_timer(&mmc_detect.detect_timer, jiffies + HZ/4);
-       return IRQ_HANDLED;
-}
-
-static int corgi_mci_init(struct device *dev, irqreturn_t (*unused_detect_int)(int, void *, struct pt_regs *), void *data)
+static int corgi_mci_init(struct device *dev, irqreturn_t (*corgi_detect_int)(int, void *, struct pt_regs *), void *data)
 {
        int err;
 
@@ -161,11 +146,9 @@ static int corgi_mci_init(struct device *dev, irqreturn_t (*unused_detect_int)(i
        pxa_gpio_mode(CORGI_GPIO_nSD_DETECT | GPIO_IN);
        pxa_gpio_mode(CORGI_GPIO_SD_PWR | GPIO_OUT);
 
-       init_timer(&mmc_detect.detect_timer);
-       mmc_detect.detect_timer.function = mmc_detect_callback;
-       mmc_detect.detect_timer.data = (unsigned long) &mmc_detect;
+       corgi_mci_platform_data.detect_delay = msecs_to_jiffies(250);
 
-       err = request_irq(CORGI_IRQ_GPIO_nSD_DETECT, corgi_mmc_detect_int, SA_INTERRUPT,
+       err = request_irq(CORGI_IRQ_GPIO_nSD_DETECT, corgi_detect_int, SA_INTERRUPT,
                             "MMC card detect", data);
        if (err) {
                printk(KERN_ERR "corgi_mci_init: MMC/SD: can't request MMC card detect IRQ\n");
@@ -198,7 +181,6 @@ static int corgi_mci_get_ro(struct device *dev)
 static void corgi_mci_exit(struct device *dev, void *data)
 {
        free_irq(CORGI_IRQ_GPIO_nSD_DETECT, data);
-       del_timer(&mmc_detect.detect_timer);
 }
 
 static struct pxamci_platform_data corgi_mci_platform_data = {
index 44d886c..7c74fe0 100644 (file)
@@ -304,12 +304,6 @@ ret_point:
        call    restore_processor_state
        ret
 
-ENTRY(do_suspend_lowlevel_s4bios)
-       call save_processor_state
-       call save_registers
-       call acpi_enter_sleep_state_s4bios
-       ret
-
 ALIGN
 # saved registers
 saved_gdt:     .long   0,0
index 46ce9b2..9ad43be 100644 (file)
@@ -151,7 +151,7 @@ static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
 }
 
 
-void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
 {
        char *v = c->x86_vendor_id;
        int i;
index 1efdc76..35d3ce2 100644 (file)
@@ -573,8 +573,7 @@ static int balanced_irq(void *unused)
        }
 
        for ( ; ; ) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               time_remaining = schedule_timeout(time_remaining);
+               time_remaining = schedule_timeout_interruptible(time_remaining);
                try_to_freeze();
                if (time_after(jiffies,
                                prev_balance_time+balanced_irq_interval)) {
index 5e4893d..c70cd2a 100644 (file)
@@ -1330,8 +1330,7 @@ void __cpu_die(unsigned int cpu)
                        printk ("CPU %d is now offline\n", cpu);
                        return;
                }
-               current->state = TASK_UNINTERRUPTIBLE;
-               schedule_timeout(HZ/10);
+               msleep(100);
        }
        printk(KERN_ERR "CPU %u didn't die...\n", cpu);
 }
index 4ebbf39..8d48420 100644 (file)
@@ -491,12 +491,7 @@ init_handler_platform (pal_min_state_area_t *ms,
        unw_init_from_interruption(&info, current, pt, sw);
        ia64_do_show_stack(&info, NULL);
 
-#ifdef CONFIG_SMP
-       /* read_trylock() would be handy... */
-       if (!tasklist_lock.write_lock)
-               read_lock(&tasklist_lock);
-#endif
-       {
+       if (read_trylock(&tasklist_lock)) {
                struct task_struct *g, *t;
                do_each_thread (g, t) {
                        if (t == current)
@@ -506,10 +501,6 @@ init_handler_platform (pal_min_state_area_t *ms,
                        show_stack(t, NULL);
                } while_each_thread (g, t);
        }
-#ifdef CONFIG_SMP
-       if (!tasklist_lock.write_lock)
-               read_unlock(&tasklist_lock);
-#endif
 
        printk("\nINIT dump complete.  Please reboot now.\n");
        while (1);                      /* hang city if no debugger */
index 7622d4e..1ef3987 100644 (file)
@@ -242,8 +242,8 @@ config SMP
          Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
          Management" code will be disabled if you say Y here.
 
-         See also the <file:Documentation/smp.tex>,
-         <file:Documentation/smp.txt> and the SMP-HOWTO available at
+         See also the <file:Documentation/smp.txt>,
+         and the SMP-HOWTO available at
          <http://www.linuxdoc.org/docs.html#howto>.
 
          If you don't know what to do here, say N.
index 48b187f..a4576ac 100644 (file)
@@ -892,7 +892,6 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
        int try)
 {
        spinlock_t *ipilock;
-       unsigned long flags = 0;
        volatile unsigned long *ipicr_addr;
        unsigned long ipicr_val;
        unsigned long my_physid_mask;
@@ -916,50 +915,27 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
         * write IPICRi (send IPIi)
         * unlock ipi_lock[i]
         */
+       spin_lock(ipilock);
        __asm__ __volatile__ (
-               ";; LOCK ipi_lock[i]            \n\t"
+               ";; CHECK IPICRi == 0           \n\t"
                ".fillinsn                      \n"
                "1:                             \n\t"
-               "mvfc   %1, psw                 \n\t"
-               "clrpsw #0x40 -> nop            \n\t"
-               DCACHE_CLEAR("r4", "r5", "%2")
-               "lock   r4, @%2                 \n\t"
-               "addi   r4, #-1                 \n\t"
-               "unlock r4, @%2                 \n\t"
-               "mvtc   %1, psw                 \n\t"
-               "bnez   r4, 2f                  \n\t"
-               LOCK_SECTION_START(".balign 4 \n\t")
-               ".fillinsn                      \n"
-               "2:                             \n\t"
-               "ld     r4, @%2                 \n\t"
-               "blez   r4, 2b                  \n\t"
+               "ld     %0, @%1                 \n\t"
+               "and    %0, %4                  \n\t"
+               "beqz   %0, 2f                  \n\t"
+               "bnez   %3, 3f                  \n\t"
                "bra    1b                      \n\t"
-               LOCK_SECTION_END
-               ";; CHECK IPICRi == 0           \n\t"
-               ".fillinsn                      \n"
-               "3:                             \n\t"
-               "ld     %0, @%3                 \n\t"
-               "and    %0, %6                  \n\t"
-               "beqz   %0, 4f                  \n\t"
-               "bnez   %5, 5f                  \n\t"
-               "bra    3b                      \n\t"
                ";; WRITE IPICRi (send IPIi)    \n\t"
                ".fillinsn                      \n"
-               "4:                             \n\t"
-               "st     %4, @%3                 \n\t"
-               ";; UNLOCK ipi_lock[i]          \n\t"
+               "2:                             \n\t"
+               "st     %2, @%1                 \n\t"
                ".fillinsn                      \n"
-               "5:                             \n\t"
-               "ldi    r4, #1                  \n\t"
-               "st     r4, @%2                 \n\t"
+               "3:                             \n\t"
                : "=&r"(ipicr_val)
-               : "r"(flags), "r"(&ipilock->slock), "r"(ipicr_addr),
-                 "r"(mask), "r"(try), "r"(my_physid_mask)
-               : "memory", "r4"
-#ifdef CONFIG_CHIP_M32700_TS1
-               , "r5"
-#endif /* CONFIG_CHIP_M32700_TS1 */
+               : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
+               : "memory"
        );
+       spin_unlock(ipilock);
 
        return ipicr_val;
 }
index 4c114ae..eff8932 100644 (file)
@@ -440,18 +440,6 @@ struct irix5_siginfo {
        } stuff;
 };
 
-static inline unsigned long timespectojiffies(struct timespec *value)
-{
-       unsigned long sec = (unsigned) value->tv_sec;
-       long nsec = value->tv_nsec;
-
-       if (sec > (LONG_MAX / HZ))
-               return LONG_MAX;
-       nsec += 1000000000L / HZ - 1;
-       nsec /= 1000000000L / HZ;
-       return HZ * sec + nsec;
-}
-
 asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info,
                                struct timespec *tp)
 {
@@ -489,14 +477,13 @@ asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info,
                        error = -EINVAL;
                        goto out;
                }
-               expire = timespectojiffies(tp)+(tp->tv_sec||tp->tv_nsec);
+               expire = timespec_to_jiffies(tp) + (tp->tv_sec||tp->tv_nsec);
        }
 
        while(1) {
                long tmp = 0;
 
-               current->state = TASK_INTERRUPTIBLE;
-               expire = schedule_timeout(expire);
+               expire = schedule_timeout_interruptible(expire);
 
                for (i=0; i<=4; i++)
                        tmp |= (current->pending.signal.sig[i] & kset.sig[i]);
index b465954..4de1556 100644 (file)
@@ -1032,8 +1032,7 @@ bad:
 
 asmlinkage int irix_sginap(int ticks)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(ticks);
+       schedule_timeout_interruptible(ticks);
        return 0;
 }
 
index e44e957..fd82c84 100644 (file)
  * has a cmpxchg, and where atomic->value is an int holding
  * the value of the atomic (i.e. the high bits aren't used
  * for a lock or anything like that).
- *
- * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h
- * if spinlocks are empty and thus atomic_dec_and_lock is defined
- * to be atomic_dec_and_test - in that case we don't need it
- * defined here as well.
  */
-
-#ifndef ATOMIC_DEC_AND_LOCK
 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 {
        int counter;
@@ -52,4 +45,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif /* ATOMIC_DEC_AND_LOCK */
index 7bf7056..5f2e690 100644 (file)
@@ -5,5 +5,3 @@
 lib-y  := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o
 
 obj-y  := iomap.o
-
-lib-$(CONFIG_SMP) += debuglocks.o
index 2de182f..90f400b 100644 (file)
@@ -13,8 +13,8 @@
 #include <asm/atomic.h>
 
 #ifdef CONFIG_SMP
-spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
-       [0 ... (ATOMIC_HASH_SIZE-1)]  = SPIN_LOCK_UNLOCKED
+raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
+       [0 ... (ATOMIC_HASH_SIZE-1)]  = __RAW_SPIN_LOCK_UNLOCKED
 };
 #endif
 
diff --git a/arch/parisc/lib/debuglocks.c b/arch/parisc/lib/debuglocks.c
deleted file mode 100644 (file)
index 1b33fe6..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-/* 
- *    Debugging versions of SMP locking primitives.
- *
- *    Copyright (C) 2004 Thibaut VARENE <varenet@parisc-linux.org>
- *
- *    Some code stollen from alpha & sparc64 ;)
- *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; either version 2 of the License, or
- *    (at your option) any later version.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *    GNU General Public License for more details.
- *
- *    You should have received a copy of the GNU General Public License
- *    along with this program; if not, write to the Free Software
- *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- *    We use pdc_printf() throughout the file for all output messages, to avoid
- *    losing messages because of disabled interrupts. Since we're using these
- *    messages for debugging purposes, it makes sense not to send them to the
- *    linux console.
- */
-
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/hardirq.h>     /* in_interrupt() */
-#include <asm/system.h>
-#include <asm/hardirq.h>       /* in_interrupt() */
-#include <asm/pdc.h>
-
-#undef INIT_STUCK
-#define INIT_STUCK 1L << 30
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-
-
-void _dbg_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       volatile unsigned int *a;
-       long stuck = INIT_STUCK;
-       void *inline_pc = __builtin_return_address(0);
-       unsigned long started = jiffies;
-       int printed = 0;
-       int cpu = smp_processor_id();
-
-try_again:
-
-       /* Do the actual locking */
-       /* <T-Bone> ggg: we can't get stuck on the outter loop?
-        * <ggg> T-Bone: We can hit the outer loop
-        *      alot if multiple CPUs are constantly racing for a lock
-        *      and the backplane is NOT fair about which CPU sees
-        *      the update first. But it won't hang since every failed
-        *      attempt will drop us back into the inner loop and
-        *      decrement `stuck'.
-        * <ggg> K-class and some of the others are NOT fair in the HW
-        *      implementation so we could see false positives.
-        *      But fixing the lock contention is easier than
-        *      fixing the HW to be fair.
-        * <tausq> __ldcw() returns 1 if we get the lock; otherwise we
-        *      spin until the value of the lock changes, or we time out.
-        */
-       mb();
-       a = __ldcw_align(lock);
-       while (stuck && (__ldcw(a) == 0))
-               while ((*a == 0) && --stuck);
-       mb();
-
-       if (unlikely(stuck <= 0)) {
-               pdc_printf(
-                       "%s:%d: spin_lock(%s/%p) stuck in %s at %p(%d)"
-                       " owned by %s:%d in %s at %p(%d)\n",
-                       base_file, line_no, lock->module, lock,
-                       current->comm, inline_pc, cpu,
-                       lock->bfile, lock->bline, lock->task->comm,
-                       lock->previous, lock->oncpu);
-               stuck = INIT_STUCK;
-               printed = 1;
-               goto try_again;
-       }
-
-       /* Exiting.  Got the lock.  */
-       lock->oncpu = cpu;
-       lock->previous = inline_pc;
-       lock->task = current;
-       lock->bfile = (char *)base_file;
-       lock->bline = line_no;
-
-       if (unlikely(printed)) {
-               pdc_printf(
-                       "%s:%d: spin_lock grabbed in %s at %p(%d) %ld ticks\n",
-                       base_file, line_no, current->comm, inline_pc,
-                       cpu, jiffies - started);
-       }
-}
-
-void _dbg_spin_unlock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       CHECK_LOCK(lock);
-       volatile unsigned int *a;
-       mb();
-       a = __ldcw_align(lock);
-       if (unlikely((*a != 0) && lock->babble)) {
-               lock->babble--;
-               pdc_printf(
-                       "%s:%d: spin_unlock(%s:%p) not locked\n",
-                       base_file, line_no, lock->module, lock);
-       }
-       *a = 1; 
-       mb();
-}
-
-int _dbg_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       int ret;
-       volatile unsigned int *a;
-       mb();
-       a = __ldcw_align(lock);
-       ret = (__ldcw(a) != 0);
-       mb();
-       if (ret) {
-               lock->oncpu = smp_processor_id();
-               lock->previous = __builtin_return_address(0);
-               lock->task = current;
-       } else {
-               lock->bfile = (char *)base_file;
-               lock->bline = line_no;
-       }
-       return ret;
-}
-
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
-#ifdef CONFIG_DEBUG_RWLOCK
-
-/* Interrupts trouble detailed explanation, thx Grant:
- *
- * o writer (wants to modify data) attempts to acquire the rwlock
- * o He gets the write lock.
- * o Interupts are still enabled, we take an interrupt with the
- *   write still holding the lock.
- * o interrupt handler tries to acquire the rwlock for read.
- * o deadlock since the writer can't release it at this point.
- * 
- * In general, any use of spinlocks that competes between "base"
- * level and interrupt level code will risk deadlock. Interrupts
- * need to be disabled in the base level routines to avoid it.
- * Or more precisely, only the IRQ the base level routine
- * is competing with for the lock.  But it's more efficient/faster
- * to just disable all interrupts on that CPU to guarantee
- * once it gets the lock it can release it quickly too.
- */
-void _dbg_write_lock(rwlock_t *rw, const char *bfile, int bline)
-{
-       void *inline_pc = __builtin_return_address(0);
-       unsigned long started = jiffies;
-       long stuck = INIT_STUCK;
-       int printed = 0;
-       int cpu = smp_processor_id();
-       
-       if(unlikely(in_interrupt())) {  /* acquiring write lock in interrupt context, bad idea */
-               pdc_printf("write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline);
-               BUG();
-       }
-
-       /* Note: if interrupts are disabled (which is most likely), the printk
-       will never show on the console. We might need a polling method to flush
-       the dmesg buffer anyhow. */
-       
-retry:
-       _raw_spin_lock(&rw->lock);
-
-       if(rw->counter != 0) {
-               /* this basically never happens */
-               _raw_spin_unlock(&rw->lock);
-               
-               stuck--;
-               if ((unlikely(stuck <= 0)) && (rw->counter < 0)) {
-                       pdc_printf(
-                               "%s:%d: write_lock stuck on writer"
-                               " in %s at %p(%d) %ld ticks\n",
-                               bfile, bline, current->comm, inline_pc,
-                               cpu, jiffies - started);
-                       stuck = INIT_STUCK;
-                       printed = 1;
-               }
-               else if (unlikely(stuck <= 0)) {
-                       pdc_printf(
-                               "%s:%d: write_lock stuck on reader"
-                               " in %s at %p(%d) %ld ticks\n",
-                               bfile, bline, current->comm, inline_pc,
-                               cpu, jiffies - started);
-                       stuck = INIT_STUCK;
-                       printed = 1;
-               }
-               
-               while(rw->counter != 0);
-
-               goto retry;
-       }
-
-       /* got it.  now leave without unlocking */
-       rw->counter = -1; /* remember we are locked */
-
-       if (unlikely(printed)) {
-               pdc_printf(
-                       "%s:%d: write_lock grabbed in %s at %p(%d) %ld ticks\n",
-                       bfile, bline, current->comm, inline_pc,
-                       cpu, jiffies - started);
-       }
-}
-
-int _dbg_write_trylock(rwlock_t *rw, const char *bfile, int bline)
-{
-#if 0
-       void *inline_pc = __builtin_return_address(0);
-       int cpu = smp_processor_id();
-#endif
-       
-       if(unlikely(in_interrupt())) {  /* acquiring write lock in interrupt context, bad idea */
-               pdc_printf("write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline);
-               BUG();
-       }
-
-       /* Note: if interrupts are disabled (which is most likely), the printk
-       will never show on the console. We might need a polling method to flush
-       the dmesg buffer anyhow. */
-       
-       _raw_spin_lock(&rw->lock);
-
-       if(rw->counter != 0) {
-               /* this basically never happens */
-               _raw_spin_unlock(&rw->lock);
-               return 0;
-       }
-
-       /* got it.  now leave without unlocking */
-       rw->counter = -1; /* remember we are locked */
-#if 0
-       pdc_printf("%s:%d: try write_lock grabbed in %s at %p(%d)\n",
-                  bfile, bline, current->comm, inline_pc, cpu);
-#endif
-       return 1;
-}
-
-void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline)
-{
-#if 0
-       void *inline_pc = __builtin_return_address(0);
-       unsigned long started = jiffies;
-       int cpu = smp_processor_id();
-#endif
-       unsigned long flags;
-
-       local_irq_save(flags);
-       _raw_spin_lock(&rw->lock); 
-
-       rw->counter++;
-#if 0
-       pdc_printf(
-               "%s:%d: read_lock grabbed in %s at %p(%d) %ld ticks\n",
-               bfile, bline, current->comm, inline_pc,
-               cpu, jiffies - started);
-#endif
-       _raw_spin_unlock(&rw->lock);
-       local_irq_restore(flags);
-}
-
-#endif /* CONFIG_DEBUG_RWLOCK */
index e3f1ce3..347ea28 100644 (file)
@@ -265,6 +265,15 @@ config PPC601_SYNC_FIX
 
          If in doubt, say Y here.
 
+config HOTPLUG_CPU
+       bool "Support for enabling/disabling CPUs"
+       depends on SMP && HOTPLUG && EXPERIMENTAL && PPC_PMAC
+       ---help---
+         Say Y here to be able to disable and re-enable individual
+         CPUs at runtime on SMP machines.
+
+         Say N if you are unsure.
+
 source arch/ppc/platforms/4xx/Kconfig
 source arch/ppc/platforms/85xx/Kconfig
 
index 4b3fe39..6dd7b50 100644 (file)
@@ -21,13 +21,14 @@ CC          := $(CC) -m32
 endif
 
 LDFLAGS_vmlinux        := -Ttext $(KERNELLOAD) -Bstatic
-CPPFLAGS       += -Iarch/$(ARCH) -Iinclude3
+# The -Iarch/$(ARCH)/include is temporary while we are merging
+CPPFLAGS       += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
 AFLAGS         += -Iarch/$(ARCH)
 CFLAGS         += -Iarch/$(ARCH) -msoft-float -pipe \
                -ffixed-r2 -mmultiple
 CPP            = $(CC) -E $(CFLAGS)
 # Temporary hack until we have migrated to asm-powerpc
-LINUXINCLUDE    += -Iinclude3
+LINUXINCLUDE    += -Iarch/$(ARCH)/include
 
 CHECKFLAGS     += -D__powerpc__
 
@@ -103,15 +104,16 @@ endef
 
 archclean:
        $(Q)$(MAKE) $(clean)=arch/ppc/boot
-       $(Q)rm -rf include3
+       # Temporary hack until we have migrated to asm-powerpc
+       $(Q)rm -rf arch/$(ARCH)/include
 
 prepare: checkbin
 
 # Temporary hack until we have migrated to asm-powerpc
-include/asm: include3/asm
-include3/asm:
-       $(Q)if [ ! -d include3 ]; then mkdir -p include3; fi
-       $(Q)ln -fsn $(srctree)/include/asm-powerpc include3/asm
+include/asm: arch/$(ARCH)/include/asm
+arch/$(ARCH)/include/asm:
+       $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
+       $(Q)ln -fsn $(srctree)/include/asm-powerpc arch/$(ARCH)/include/asm
 
 # Use the file '.tmp_gas_check' for binutils tests, as gas won't output
 # to stdout and these checks are run even on install targets.
index 1f37b7e..ba39643 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/config.h>
 #include <asm/processor.h>
 #include <asm/page.h>
-#include <asm/ppc_asm.h>
 #include <asm/cputable.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
index 304589a..7e4fbb6 100644 (file)
@@ -14,7 +14,6 @@
 #include <asm/page.h>
 #include <asm/ppc_asm.h>
 #include <asm/cputable.h>
-#include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
 
index e0c631c..b566d98 100644 (file)
@@ -393,7 +393,7 @@ EXPORT_SYMBOL(__dma_sync);
  * __dma_sync_page() implementation for systems using highmem.
  * In this case, each page of a buffer must be kmapped/kunmapped
  * in order to have a virtual address for __dma_sync(). This must
- * not sleep so kmap_atmomic()/kunmap_atomic() are used.
+ * not sleep so kmap_atomic()/kunmap_atomic() are used.
  *
  * Note: yes, it is possible and correct to have a buffer extend
  * beyond the first page.
index 55daf12..1960fb8 100644 (file)
@@ -1023,23 +1023,21 @@ __secondary_start_gemini:
         andc    r4,r4,r3
         mtspr   SPRN_HID0,r4
         sync
-        bl      gemini_prom_init
         b       __secondary_start
 #endif /* CONFIG_GEMINI */
-       .globl  __secondary_start_psurge
-__secondary_start_psurge:
-       li      r24,1                   /* cpu # */
-       b       __secondary_start_psurge99
-       .globl  __secondary_start_psurge2
-__secondary_start_psurge2:
-       li      r24,2                   /* cpu # */
-       b       __secondary_start_psurge99
-       .globl  __secondary_start_psurge3
-__secondary_start_psurge3:
-       li      r24,3                   /* cpu # */
-       b       __secondary_start_psurge99
-__secondary_start_psurge99:
-       /* we come in here with IR=0 and DR=1, and DBAT 0
+
+       .globl  __secondary_start_pmac_0
+__secondary_start_pmac_0:
+       /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
+       li      r24,0
+       b       1f
+       li      r24,1
+       b       1f
+       li      r24,2
+       b       1f
+       li      r24,3
+1:
+       /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
           set to map the 0xf0000000 - 0xffffffff region */
        mfmsr   r0
        rlwinm  r0,r0,0,28,26           /* clear DR (0x10) */
index 53547b6..fba29c8 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/sysctl.h>
+#include <linux/cpu.h>
 
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
@@ -35,6 +36,7 @@
 void default_idle(void)
 {
        void (*powersave)(void);
+       int cpu = smp_processor_id();
 
        powersave = ppc_md.power_save;
 
@@ -44,7 +46,7 @@ void default_idle(void)
 #ifdef CONFIG_SMP
                else {
                        set_thread_flag(TIF_POLLING_NRFLAG);
-                       while (!need_resched())
+                       while (!need_resched() && !cpu_is_offline(cpu))
                                barrier();
                        clear_thread_flag(TIF_POLLING_NRFLAG);
                }
@@ -52,6 +54,8 @@ void default_idle(void)
        }
        if (need_resched())
                schedule();
+       if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
+               cpu_die();
 }
 
 /*
index e70b587..726fe7c 100644 (file)
@@ -45,6 +45,7 @@ cpumask_t cpu_online_map;
 cpumask_t cpu_possible_map;
 int smp_hw_index[NR_CPUS];
 struct thread_info *secondary_ti;
+static struct task_struct *idle_tasks[NR_CPUS];
 
 EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL(cpu_possible_map);
@@ -286,7 +287,8 @@ static void __devinit smp_store_cpu_info(int id)
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-       int num_cpus, i;
+       int num_cpus, i, cpu;
+       struct task_struct *p;
 
        /* Fixup boot cpu */
         smp_store_cpu_info(smp_processor_id());
@@ -308,6 +310,17 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 
        if (smp_ops->space_timers)
                smp_ops->space_timers(num_cpus);
+
+       for_each_cpu(cpu) {
+               if (cpu == smp_processor_id())
+                       continue;
+               /* create a process for the processor */
+               p = fork_idle(cpu);
+               if (IS_ERR(p))
+                       panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
+               p->thread_info->cpu = cpu;
+               idle_tasks[cpu] = p;
+       }
 }
 
 void __devinit smp_prepare_boot_cpu(void)
@@ -334,12 +347,17 @@ int __devinit start_secondary(void *unused)
        set_dec(tb_ticks_per_jiffy);
        cpu_callin_map[cpu] = 1;
 
-       printk("CPU %i done callin...\n", cpu);
+       printk("CPU %d done callin...\n", cpu);
        smp_ops->setup_cpu(cpu);
-       printk("CPU %i done setup...\n", cpu);
-       local_irq_enable();
+       printk("CPU %d done setup...\n", cpu);
        smp_ops->take_timebase();
-       printk("CPU %i done timebase take...\n", cpu);
+       printk("CPU %d done timebase take...\n", cpu);
+
+       spin_lock(&call_lock);
+       cpu_set(cpu, cpu_online_map);
+       spin_unlock(&call_lock);
+
+       local_irq_enable();
 
        cpu_idle();
        return 0;
@@ -347,17 +365,11 @@ int __devinit start_secondary(void *unused)
 
 int __cpu_up(unsigned int cpu)
 {
-       struct task_struct *p;
        char buf[32];
        int c;
 
-       /* create a process for the processor */
-       /* only regs.msr is actually used, and 0 is OK for it */
-       p = fork_idle(cpu);
-       if (IS_ERR(p))
-               panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
-       secondary_ti = p->thread_info;
-       p->thread_info->cpu = cpu;
+       secondary_ti = idle_tasks[cpu]->thread_info;
+       mb();
 
        /*
         * There was a cache flush loop here to flush the cache
@@ -389,7 +401,11 @@ int __cpu_up(unsigned int cpu)
        printk("Processor %d found.\n", cpu);
 
        smp_ops->give_timebase();
-       cpu_set(cpu, cpu_online_map);
+
+       /* Wait until cpu puts itself in the online map */
+       while (!cpu_online(cpu))
+               cpu_relax();
+
        return 0;
 }
 
index 8356d54..961ede8 100644 (file)
@@ -118,6 +118,28 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
        info.si_code = code;
        info.si_addr = (void __user *) addr;
        force_sig_info(signr, &info, current);
+
+       /*
+        * Init gets no signals that it doesn't have a handler for.
+        * That's all very well, but if it has caused a synchronous
+        * exception and we ignore the resulting signal, it will just
+        * generate the same exception over and over again and we get
+        * nowhere.  Better to kill it and let the kernel panic.
+        */
+       if (current->pid == 1) {
+               __sighandler_t handler;
+
+               spin_lock_irq(&current->sighand->siglock);
+               handler = current->sighand->action[signr-1].sa.sa_handler;
+               spin_unlock_irq(&current->sighand->siglock);
+               if (handler == SIG_DFL) {
+                       /* init has generated a synchronous exception
+                          and it doesn't have a handler for the signal */
+                       printk(KERN_CRIT "init has generated signal %d "
+                              "but has no handler for it\n", signr);
+                       do_exit(signr);
+               }
+       }
 }
 
 /*
index 1c380e6..f1e1fb4 100644 (file)
@@ -4,6 +4,5 @@
 
 obj-y                  := checksum.o string.o strcase.o dec_and_lock.o div64.o
 
-obj-$(CONFIG_SMP)      += locks.o
 obj-$(CONFIG_8xx)      += rheap.o
 obj-$(CONFIG_CPM2)     += rheap.o
index 4ee8880..b18f0d9 100644 (file)
  * has a cmpxchg, and where atomic->value is an int holding
  * the value of the atomic (i.e. the high bits aren't used
  * for a lock or anything like that).
- *
- * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h
- * if spinlocks are empty and thus atomic_dec_and_lock is defined
- * to be atomic_dec_and_test - in that case we don't need it
- * defined here as well.
  */
-
-#ifndef ATOMIC_DEC_AND_LOCK
 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 {
        int counter;
@@ -43,4 +36,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif /* ATOMIC_DEC_AND_LOCK */
index 57d9930..ee5e9f2 100644 (file)
@@ -278,11 +278,7 @@ bad_area:
 
        /* User mode accesses cause a SIGSEGV */
        if (user_mode(regs)) {
-               info.si_signo = SIGSEGV;
-               info.si_errno = 0;
-               info.si_code = code;
-               info.si_addr = (void __user *) address;
-               force_sig_info(SIGSEGV, &info, current);
+               _exception(SIGSEGV, regs, code, address);
                return 0;
        }
 
index 8d67adc..88419c7 100644 (file)
@@ -161,6 +161,8 @@ _GLOBAL(low_sleep_handler)
        addi r3,r3,sleep_storage@l
        stw r5,0(r3)
 
+       .globl  low_cpu_die
+low_cpu_die:
        /* Flush & disable all caches */
        bl      flush_disable_caches
 
index 8e049da..794a239 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/spinlock.h>
 #include <linux/errno.h>
 #include <linux/hardirq.h>
+#include <linux/cpu.h>
 
 #include <asm/ptrace.h>
 #include <asm/atomic.h>
@@ -55,9 +56,7 @@
  * Powersurge (old powermac SMP) support.
  */
 
-extern void __secondary_start_psurge(void);
-extern void __secondary_start_psurge2(void);   /* Temporary horrible hack */
-extern void __secondary_start_psurge3(void);   /* Temporary horrible hack */
+extern void __secondary_start_pmac_0(void);
 
 /* Addresses for powersurge registers */
 #define HAMMERHEAD_BASE                0xf8000000
@@ -119,7 +118,7 @@ static volatile int sec_tb_reset = 0;
 static unsigned int pri_tb_hi, pri_tb_lo;
 static unsigned int pri_tb_stamp;
 
-static void __init core99_init_caches(int cpu)
+static void __devinit core99_init_caches(int cpu)
 {
        if (!cpu_has_feature(CPU_FTR_L2CR))
                return;
@@ -346,7 +345,7 @@ static int __init smp_psurge_probe(void)
 
 static void __init smp_psurge_kick_cpu(int nr)
 {
-       void (*start)(void) = __secondary_start_psurge;
+       unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
        unsigned long a;
 
        /* may need to flush here if secondary bats aren't setup */
@@ -356,17 +355,7 @@ static void __init smp_psurge_kick_cpu(int nr)
 
        if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
 
-       /* setup entry point of secondary processor */
-       switch (nr) {
-       case 2:
-               start = __secondary_start_psurge2;
-               break;
-       case 3:
-               start = __secondary_start_psurge3;
-               break;
-       }
-
-       out_be32(psurge_start, __pa(start));
+       out_be32(psurge_start, start);
        mb();
 
        psurge_set_ipi(nr);
@@ -500,14 +489,14 @@ static int __init smp_core99_probe(void)
        return ncpus;
 }
 
-static void __init smp_core99_kick_cpu(int nr)
+static void __devinit smp_core99_kick_cpu(int nr)
 {
        unsigned long save_vector, new_vector;
        unsigned long flags;
 
        volatile unsigned long *vector
                 = ((volatile unsigned long *)(KERNELBASE+0x100));
-       if (nr < 1 || nr > 3)
+       if (nr < 0 || nr > 3)
                return;
        if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
 
@@ -518,19 +507,9 @@ static void __init smp_core99_kick_cpu(int nr)
        save_vector = *vector;
 
        /* Setup fake reset vector that does    
-        *   b __secondary_start_psurge - KERNELBASE
+        *   b __secondary_start_pmac_0 + nr*8 - KERNELBASE
         */
-       switch(nr) {
-               case 1:
-                       new_vector = (unsigned long)__secondary_start_psurge;
-                       break;
-               case 2:
-                       new_vector = (unsigned long)__secondary_start_psurge2;
-                       break;
-               case 3:
-                       new_vector = (unsigned long)__secondary_start_psurge3;
-                       break;
-       }
+       new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
        *vector = 0x48000002 + new_vector - KERNELBASE;
 
        /* flush data cache and inval instruction cache */
@@ -554,7 +533,7 @@ static void __init smp_core99_kick_cpu(int nr)
        if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
 }
 
-static void __init smp_core99_setup_cpu(int cpu_nr)
+static void __devinit smp_core99_setup_cpu(int cpu_nr)
 {
        /* Setup L2/L3 */
        if (cpu_nr != 0)
@@ -668,3 +647,47 @@ struct smp_ops_t core99_smp_ops __pmacdata = {
        .give_timebase  = smp_core99_give_timebase,
        .take_timebase  = smp_core99_take_timebase,
 };
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+int __cpu_disable(void)
+{
+       cpu_clear(smp_processor_id(), cpu_online_map);
+
+       /* XXX reset cpu affinity here */
+       openpic_set_priority(0xf);
+       asm volatile("mtdec %0" : : "r" (0x7fffffff));
+       mb();
+       udelay(20);
+       asm volatile("mtdec %0" : : "r" (0x7fffffff));
+       return 0;
+}
+
+extern void low_cpu_die(void) __attribute__((noreturn)); /* in pmac_sleep.S */
+static int cpu_dead[NR_CPUS];
+
+void cpu_die(void)
+{
+       local_irq_disable();
+       cpu_dead[smp_processor_id()] = 1;
+       mb();
+       low_cpu_die();
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       int timeout;
+
+       timeout = 1000;
+       while (!cpu_dead[cpu]) {
+               if (--timeout == 0) {
+                       printk("CPU %u refused to die!\n", cpu);
+                       break;
+               }
+               msleep(1);
+       }
+       cpu_callin_map[cpu] = 0;
+       cpu_dead[cpu] = 0;
+}
+
+#endif
index 7747098..75fe8eb 100644 (file)
@@ -90,14 +90,10 @@ cpc700_mask_and_ack_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type cpc700_pic = {
-       "CPC700 PIC",
-       NULL,
-       NULL,
-       cpc700_unmask_irq,
-       cpc700_mask_irq,
-       cpc700_mask_and_ack_irq,
-       NULL,
-       NULL
+       .typename = "CPC700 PIC",
+       .enable = cpc700_unmask_irq,
+       .disable = cpc700_mask_irq,
+       .ack = cpc700_mask_and_ack_irq,
 };
 
 __init static void
index b9391e6..5c7908c 100644 (file)
@@ -129,14 +129,11 @@ static void i8259_end_irq(unsigned int irq)
 }
 
 struct hw_interrupt_type i8259_pic = {
-       " i8259    ",
-       NULL,
-       NULL,
-       i8259_unmask_irq,
-       i8259_mask_irq,
-       i8259_mask_and_ack_irq,
-       i8259_end_irq,
-       NULL
+       .typename = " i8259    ",
+       .enable = i8259_unmask_irq,
+       .disable = i8259_mask_irq,
+       .ack = i8259_mask_and_ack_irq,
+       .end = i8259_end_irq,
 };
 
 static struct resource pic1_iores = {
index 7e272c5..2e0ea92 100644 (file)
@@ -82,13 +82,11 @@ static void openpic2_end_irq(unsigned int irq_nr);
 static void openpic2_ack_irq(unsigned int irq_nr);
 
 struct hw_interrupt_type open_pic2 = {
-       " OpenPIC2 ",
-       NULL,
-       NULL,
-       openpic2_enable_irq,
-       openpic2_disable_irq,
-       openpic2_ack_irq,
-       openpic2_end_irq,
+       .typename = " OpenPIC2 ",
+       .enable = openpic2_enable_irq,
+       .disable = openpic2_disable_irq,
+       .ack = openpic2_ack_irq,
+       .end = openpic2_end_irq,
 };
 
 /*
index 06cb0af..ce4d1de 100644 (file)
@@ -34,13 +34,10 @@ static void ppc403_aic_disable(unsigned int irq);
 static void ppc403_aic_disable_and_ack(unsigned int irq);
 
 static struct hw_interrupt_type ppc403_aic = {
-       "403GC AIC",
-       NULL,
-       NULL,
-       ppc403_aic_enable,
-       ppc403_aic_disable,
-       ppc403_aic_disable_and_ack,
-       0
+       .typename = "403GC AIC",
+       .enable = ppc403_aic_enable,
+       .disable = ppc403_aic_disable,
+       .ack = ppc403_aic_disable_and_ack,
 };
 
 int
index e0bd66f..2cbcad2 100644 (file)
@@ -79,14 +79,11 @@ xilinx_intc_end(unsigned int irq)
 }
 
 static struct hw_interrupt_type xilinx_intc = {
-       "Xilinx Interrupt Controller",
-       NULL,
-       NULL,
-       xilinx_intc_enable,
-       xilinx_intc_disable,
-       xilinx_intc_disable_and_ack,
-       xilinx_intc_end,
-       0
+       .typename = "Xilinx Interrupt Controller",
+       .enable = xilinx_intc_enable,
+       .disable = xilinx_intc_disable,
+       .ack = xilinx_intc_disable_and_ack,
+       .end = xilinx_intc_end,
 };
 
 int
index 0a23aea..17d2c1e 100644 (file)
@@ -56,7 +56,7 @@ LDFLAGS_vmlinux       := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD)
 CFLAGS         += -msoft-float -pipe -mminimal-toc -mtraceback=none \
                   -mcall-aixdesc
 # Temporary hack until we have migrated to asm-powerpc
-CPPFLAGS       += -Iinclude3
+CPPFLAGS       += -Iarch/$(ARCH)/include
 
 GCC_VERSION     := $(call cc-version)
 GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;)
@@ -115,14 +115,15 @@ all: $(KBUILD_IMAGE)
 
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
-       $(Q)rm -rf include3
+       # Temporary hack until we have migrated to asm-powerpc
+       $(Q)rm -rf arch/$(ARCH)/include
 
 
 # Temporary hack until we have migrated to asm-powerpc
-include/asm: include3/asm
-include3/asm:
-       $(Q)if [ ! -d include3 ]; then mkdir -p include3; fi;
-       $(Q)ln -fsn $(srctree)/include/asm-powerpc include3/asm
+include/asm: arch/$(ARCH)/include/asm
+arch/$(ARCH)/include/asm:
+       $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
+       $(Q)ln -fsn $(srctree)/include/asm-powerpc arch/$(ARCH)/include/asm
 
 define archhelp
   echo  '  zImage.vmode        - Compressed kernel image (arch/$(ARCH)/boot/zImage.vmode)'
index bfce609..1fb673c 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/config.h>
 #include <asm/processor.h>
 #include <asm/page.h>
-#include <asm/ppc_asm.h>
 #include <asm/cputable.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
index 6e8d859..7b9d4da 100644 (file)
  * has a cmpxchg, and where atomic->value is an int holding
  * the value of the atomic (i.e. the high bits aren't used
  * for a lock or anything like that).
- *
- * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h
- * if spinlocks are empty and thus atomic_dec_and_lock is defined
- * to be atomic_dec_and_test - in that case we don't need it
- * defined here as well.
  */
-
-#ifndef ATOMIC_DEC_AND_LOCK
 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 {
        int counter;
@@ -52,4 +45,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif /* ATOMIC_DEC_AND_LOCK */
index ef70ef9..033643a 100644 (file)
 /* waiting for a spinlock... */
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 
-void __spin_yield(spinlock_t *lock)
+void __spin_yield(raw_spinlock_t *lock)
 {
        unsigned int lock_value, holder_cpu, yield_count;
        struct paca_struct *holder_paca;
 
-       lock_value = lock->lock;
+       lock_value = lock->slock;
        if (lock_value == 0)
                return;
        holder_cpu = lock_value & 0xffff;
@@ -38,7 +38,7 @@ void __spin_yield(spinlock_t *lock)
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
        rmb();
-       if (lock->lock != lock_value)
+       if (lock->slock != lock_value)
                return;         /* something has changed */
 #ifdef CONFIG_PPC_ISERIES
        HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
@@ -54,7 +54,7 @@ void __spin_yield(spinlock_t *lock)
  * This turns out to be the same for read and write locks, since
  * we only know the holder if it is write-locked.
  */
-void __rw_yield(rwlock_t *rw)
+void __rw_yield(raw_rwlock_t *rw)
 {
        int lock_value;
        unsigned int holder_cpu, yield_count;
@@ -82,9 +82,9 @@ void __rw_yield(rwlock_t *rw)
 }
 #endif
 
-void spin_unlock_wait(spinlock_t *lock)
+void __raw_spin_unlock_wait(raw_spinlock_t *lock)
 {
-       while (lock->lock) {
+       while (lock->slock) {
                HMT_low();
                if (SHARED_PROCESSOR)
                        __spin_yield(lock);
@@ -92,4 +92,4 @@ void spin_unlock_wait(spinlock_t *lock)
        HMT_medium();
 }
 
-EXPORT_SYMBOL(spin_unlock_wait);
+EXPORT_SYMBOL(__raw_spin_unlock_wait);
index 888b559..2dc14e9 100644 (file)
@@ -36,7 +36,7 @@ _diag44(void)
 }
 
 void
-_raw_spin_lock_wait(spinlock_t *lp, unsigned int pc)
+_raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
 {
        int count = spin_retry;
 
@@ -53,7 +53,7 @@ _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc)
 EXPORT_SYMBOL(_raw_spin_lock_wait);
 
 int
-_raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc)
+_raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
 {
        int count = spin_retry;
 
@@ -67,7 +67,7 @@ _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc)
 EXPORT_SYMBOL(_raw_spin_trylock_retry);
 
 void
-_raw_read_lock_wait(rwlock_t *rw)
+_raw_read_lock_wait(raw_rwlock_t *rw)
 {
        unsigned int old;
        int count = spin_retry;
@@ -86,7 +86,7 @@ _raw_read_lock_wait(rwlock_t *rw)
 EXPORT_SYMBOL(_raw_read_lock_wait);
 
 int
-_raw_read_trylock_retry(rwlock_t *rw)
+_raw_read_trylock_retry(raw_rwlock_t *rw)
 {
        unsigned int old;
        int count = spin_retry;
@@ -102,7 +102,7 @@ _raw_read_trylock_retry(rwlock_t *rw)
 EXPORT_SYMBOL(_raw_read_trylock_retry);
 
 void
-_raw_write_lock_wait(rwlock_t *rw)
+_raw_write_lock_wait(raw_rwlock_t *rw)
 {
        int count = spin_retry;
 
@@ -119,7 +119,7 @@ _raw_write_lock_wait(rwlock_t *rw)
 EXPORT_SYMBOL(_raw_write_lock_wait);
 
 int
-_raw_write_trylock_retry(rwlock_t *rw)
+_raw_write_trylock_retry(raw_rwlock_t *rw)
 {
        int count = spin_retry;
 
index ca91bb0..c0973f8 100644 (file)
@@ -37,13 +37,13 @@ static void end_maskreg_irq(unsigned int irq);
 
 /* hw_interrupt_type */
 static struct hw_interrupt_type maskreg_irq_type = {
-       " Mask Register",
-       startup_maskreg_irq,
-       shutdown_maskreg_irq,
-       enable_maskreg_irq,
-       disable_maskreg_irq,
-       mask_and_ack_maskreg,
-       end_maskreg_irq
+       .typename = " Mask Register",
+       .startup = startup_maskreg_irq,
+       .shutdown = shutdown_maskreg_irq,
+       .enable = enable_maskreg_irq,
+       .disable = disable_maskreg_irq,
+       .ack = mask_and_ack_maskreg,
+       .end = end_maskreg_irq
 };
 
 /* actual implementatin */
index 697144d..a9fde78 100644 (file)
@@ -37,10 +37,6 @@ static u8 bigsur_iomap_lo_shift[BIGSUR_IOMAP_LO_NMAP];
 static u32 bigsur_iomap_hi[BIGSUR_IOMAP_HI_NMAP];
 static u8 bigsur_iomap_hi_shift[BIGSUR_IOMAP_HI_NMAP];
 
-#ifndef MAX
-#define MAX(a,b)    ((a)>(b)?(a):(b))
-#endif
-
 void bigsur_port_map(u32 baseport, u32 nports, u32 addr, u8 shift)
 {
        u32 port, endport = baseport + nports;
@@ -57,7 +53,7 @@ void bigsur_port_map(u32 baseport, u32 nports, u32 addr, u8 shift)
                addr += (1<<(BIGSUR_IOMAP_LO_SHIFT));
        }
 
-       for (port = MAX(baseport, BIGSUR_IOMAP_LO_THRESH) ;
+       for (port = max_t(u32, baseport, BIGSUR_IOMAP_LO_THRESH);
             port < endport && port < BIGSUR_IOMAP_HI_THRESH ;
             port += (1<<BIGSUR_IOMAP_HI_SHIFT)) {
                pr_debug("    maphi[0x%x] = 0x%08x\n", port, addr);
@@ -80,7 +76,7 @@ void bigsur_port_unmap(u32 baseport, u32 nports)
                bigsur_iomap_lo[port>>BIGSUR_IOMAP_LO_SHIFT] = 0;
        }
 
-       for (port = MAX(baseport, BIGSUR_IOMAP_LO_THRESH) ;
+       for (port = max_t(u32, baseport, BIGSUR_IOMAP_LO_THRESH);
             port < endport && port < BIGSUR_IOMAP_HI_THRESH ;
             port += (1<<BIGSUR_IOMAP_HI_SHIFT)) {
                bigsur_iomap_hi[port>>BIGSUR_IOMAP_HI_SHIFT] = 0;
index c188fc3..6ddbcc7 100644 (file)
@@ -228,23 +228,23 @@ static void shutdown_bigsur_irq(unsigned int irq)
 
 /* Define the IRQ structures for the L1 and L2 IRQ types */
 static struct hw_interrupt_type bigsur_l1irq_type = {
-        "BigSur-CPLD-Level1-IRQ",
-        startup_bigsur_irq,
-        shutdown_bigsur_irq,
-        enable_bigsur_l1irq,
-        disable_bigsur_l1irq,
-        mask_and_ack_bigsur,
-        end_bigsur_irq
+       .typename  = "BigSur-CPLD-Level1-IRQ",
+       .startup = startup_bigsur_irq,
+       .shutdown = shutdown_bigsur_irq,
+       .enable = enable_bigsur_l1irq,
+       .disable = disable_bigsur_l1irq,
+       .ack = mask_and_ack_bigsur,
+       .end = end_bigsur_irq
 };
 
 static struct hw_interrupt_type bigsur_l2irq_type = {
-        "BigSur-CPLD-Level2-IRQ",
-        startup_bigsur_irq,
-        shutdown_bigsur_irq,
-        enable_bigsur_l2irq,
-        disable_bigsur_l2irq,
-        mask_and_ack_bigsur,
-        end_bigsur_irq
+       .typename  = "BigSur-CPLD-Level2-IRQ",
+       .startup = startup_bigsur_irq,
+       .shutdown  =shutdown_bigsur_irq,
+       .enable = enable_bigsur_l2irq,
+       .disable = disable_bigsur_l2irq,
+       .ack = mask_and_ack_bigsur,
+       .end = end_bigsur_irq
 };
 
 
index fa6cfe5..d1da0d8 100644 (file)
@@ -83,13 +83,13 @@ static void shutdown_cqreek_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type cqreek_irq_type = {
-       "CqREEK-IRQ",
-       startup_cqreek_irq,
-       shutdown_cqreek_irq,
-       enable_cqreek_irq,
-       disable_cqreek_irq,
-       mask_and_ack_cqreek,
-       end_cqreek_irq
+       .typename = "CqREEK-IRQ",
+       .startup = startup_cqreek_irq,
+       .shutdown = shutdown_cqreek_irq,
+       .enable = enable_cqreek_irq,
+       .disable = disable_cqreek_irq,
+       .ack = mask_and_ack_cqreek,
+       .end = end_cqreek_irq
 };
 
 int cqreek_has_ide, cqreek_has_isa;
index acd5848..52d0ba3 100644 (file)
@@ -39,13 +39,13 @@ static unsigned int startup_harp_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type harp_irq_type = {
-       "Harp-IRQ",
-       startup_harp_irq,
-       shutdown_harp_irq,
-       enable_harp_irq,
-       disable_harp_irq,
-       mask_and_ack_harp,
-       end_harp_irq
+       .typename = "Harp-IRQ",
+       .startup = startup_harp_irq,
+       .shutdown = shutdown_harp_irq,
+       .enable = enable_harp_irq,
+       .disable = disable_harp_irq,
+       .ack = mask_and_ack_harp,
+       .end = end_harp_irq
 };
 
 static void disable_harp_irq(unsigned int irq)
index 23adc6b..715e8fe 100644 (file)
@@ -86,13 +86,13 @@ static unsigned int startup_od_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type od_irq_type = {
-       "Overdrive-IRQ",
-       startup_od_irq,
-       shutdown_od_irq,
-       enable_od_irq,
-       disable_od_irq,
-       mask_and_ack_od,
-       end_od_irq
+       .typename = "Overdrive-IRQ",
+       .startup = startup_od_irq,
+       .shutdown = shutdown_od_irq,
+       .enable = enable_od_irq,
+       .disable = disable_od_irq,
+       .ack = mask_and_ack_od,
+       .end = end_od_irq
 };
 
 static void disable_od_irq(unsigned int irq)
index a7921f6..ed4c5b5 100644 (file)
@@ -74,13 +74,13 @@ static void end_hs7751rvoip_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type hs7751rvoip_irq_type = {
-       "HS7751RVoIP IRQ",
-       startup_hs7751rvoip_irq,
-       shutdown_hs7751rvoip_irq,
-       enable_hs7751rvoip_irq,
-       disable_hs7751rvoip_irq,
-       ack_hs7751rvoip_irq,
-       end_hs7751rvoip_irq,
+       .typename =  "HS7751RVoIP IRQ",
+       .startup = startup_hs7751rvoip_irq,
+       .shutdown = shutdown_hs7751rvoip_irq,
+       .enable = enable_hs7751rvoip_irq,
+       .disable = disable_hs7751rvoip_irq,
+       .ack = ack_hs7751rvoip_irq,
+       .end = end_hs7751rvoip_irq,
 };
 
 static void make_hs7751rvoip_irq(unsigned int irq)
index 95717f4..d36c937 100644 (file)
@@ -88,13 +88,13 @@ static void end_rts7751r2d_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type rts7751r2d_irq_type = {
-       "RTS7751R2D IRQ",
-       startup_rts7751r2d_irq,
-       shutdown_rts7751r2d_irq,
-       enable_rts7751r2d_irq,
-       disable_rts7751r2d_irq,
-       ack_rts7751r2d_irq,
-       end_rts7751r2d_irq,
+       .typename = "RTS7751R2D IRQ",
+       .startup = startup_rts7751r2d_irq,
+       .shutdown = shutdown_rts7751r2d_irq,
+       .enable = enable_rts7751r2d_irq,
+       .disable = disable_rts7751r2d_irq,
+       .ack = ack_rts7751r2d_irq,
+       .end = end_rts7751r2d_irq,
 };
 
 static void make_rts7751r2d_irq(unsigned int irq)
index 5675a41..7a2eb10 100644 (file)
@@ -35,13 +35,13 @@ static void end_systemh_irq(unsigned int irq);
 
 /* hw_interrupt_type */
 static struct hw_interrupt_type systemh_irq_type = {
-       " SystemH Register",
-       startup_systemh_irq,
-       shutdown_systemh_irq,
-       enable_systemh_irq,
-       disable_systemh_irq,
-       mask_and_ack_systemh,
-       end_systemh_irq
+       .typename = " SystemH Register",
+       .startup = startup_systemh_irq,
+       .shutdown = shutdown_systemh_irq,
+       .enable = enable_systemh_irq,
+       .disable = disable_systemh_irq,
+       .ack = mask_and_ack_systemh,
+       .end = end_systemh_irq
 };
 
 static unsigned int startup_systemh_irq(unsigned int irq)
index 1298883..1395c1e 100644 (file)
@@ -83,13 +83,13 @@ static unsigned int startup_microdev_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type microdev_irq_type = {
-       "MicroDev-IRQ",
-       startup_microdev_irq,
-       shutdown_microdev_irq,
-       enable_microdev_irq,
-       disable_microdev_irq,
-       mask_and_ack_microdev,
-       end_microdev_irq
+       .typename = "MicroDev-IRQ",
+       .startup = startup_microdev_irq,
+       .shutdown = shutdown_microdev_irq,
+       .enable = enable_microdev_irq,
+       .disable = disable_microdev_irq,
+       .ack = mask_and_ack_microdev,
+       .end = end_microdev_irq
 };
 
 static void disable_microdev_irq(unsigned int irq)
index 99ac709..84cb142 100644 (file)
@@ -48,10 +48,6 @@ static unsigned char hd64465_iomap_lo_shift[HD64465_IOMAP_LO_NMAP];
 static unsigned long   hd64465_iomap_hi[HD64465_IOMAP_HI_NMAP];
 static unsigned char   hd64465_iomap_hi_shift[HD64465_IOMAP_HI_NMAP];
 
-#ifndef MAX
-#define MAX(a,b)    ((a)>(b)?(a):(b))
-#endif
-
 #define PORT2ADDR(x) (sh_mv.mv_isa_port2addr(x))
 
 void hd64465_port_map(unsigned short baseport, unsigned int nports,
@@ -71,7 +67,7 @@ void hd64465_port_map(unsigned short baseport, unsigned int nports,
            addr += (1<<(HD64465_IOMAP_LO_SHIFT));
        }
 
-       for (port = MAX(baseport, HD64465_IOMAP_LO_THRESH) ;
+       for (port = max_t(unsigned int, baseport, HD64465_IOMAP_LO_THRESH);
             port < endport && port < HD64465_IOMAP_HI_THRESH ;
             port += (1<<HD64465_IOMAP_HI_SHIFT)) {
            DPRINTK("    maphi[0x%x] = 0x%08lx\n", port, addr);
@@ -95,7 +91,7 @@ void hd64465_port_unmap(unsigned short baseport, unsigned int nports)
            hd64465_iomap_lo[port>>HD64465_IOMAP_LO_SHIFT] = 0;
        }
 
-       for (port = MAX(baseport, HD64465_IOMAP_LO_THRESH) ;
+       for (port = max_t(unsigned int, baseport, HD64465_IOMAP_LO_THRESH);
             port < endport && port < HD64465_IOMAP_HI_THRESH ;
             port += (1<<HD64465_IOMAP_HI_SHIFT)) {
            hd64465_iomap_hi[port>>HD64465_IOMAP_HI_SHIFT] = 0;
index 3079234..1b6ac52 100644 (file)
@@ -87,13 +87,13 @@ static void shutdown_voyagergx_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type voyagergx_irq_type = {
-       "VOYAGERGX-IRQ",
-       startup_voyagergx_irq,
-       shutdown_voyagergx_irq,
-       enable_voyagergx_irq,
-       disable_voyagergx_irq,
-       mask_and_ack_voyagergx,
-       end_voyagergx_irq,
+       .typename = "VOYAGERGX-IRQ",
+       .startup = startup_voyagergx_irq,
+       .shutdown = shutdown_voyagergx_irq,
+       .enable = enable_voyagergx_irq,
+       .disable = disable_voyagergx_irq,
+       .ack = mask_and_ack_voyagergx,
+       .end = end_voyagergx_irq,
 };
 
 static irqreturn_t voyagergx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
index f76901e..a963d00 100644 (file)
@@ -46,13 +46,13 @@ static unsigned int startup_imask_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type imask_irq_type = {
-       "SR.IMASK",
-       startup_imask_irq,
-       shutdown_imask_irq,
-       enable_imask_irq,
-       disable_imask_irq,
-       mask_and_ack_imask,
-       end_imask_irq
+       .typename = "SR.IMASK",
+       .startup = startup_imask_irq,
+       .shutdown = shutdown_imask_irq,
+       .enable = enable_imask_irq,
+       .disable = disable_imask_irq,
+       .ack = mask_and_ack_imask,
+       .end = end_imask_irq
 };
 
 void static inline set_interrupt_registers(int ip)
index 7ea3d2d..71f9209 100644 (file)
@@ -48,13 +48,13 @@ static unsigned int startup_ipr_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type ipr_irq_type = {
-       "IPR-IRQ",
-       startup_ipr_irq,
-       shutdown_ipr_irq,
-       enable_ipr_irq,
-       disable_ipr_irq,
-       mask_and_ack_ipr,
-       end_ipr_irq
+       .typename = "IPR-IRQ",
+       .startup = startup_ipr_irq,
+       .shutdown = shutdown_ipr_irq,
+       .enable = enable_ipr_irq,
+       .disable = disable_ipr_irq,
+       .ack = mask_and_ack_ipr,
+       .end = end_ipr_irq
 };
 
 static void disable_ipr_irq(unsigned int irq)
@@ -142,13 +142,13 @@ static unsigned int startup_pint_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type pint_irq_type = {
-       "PINT-IRQ",
-       startup_pint_irq,
-       shutdown_pint_irq,
-       enable_pint_irq,
-       disable_pint_irq,
-       mask_and_ack_pint,
-       end_pint_irq
+       .typename = "PINT-IRQ",
+       .startup = startup_pint_irq,
+       .shutdown = shutdown_pint_irq,
+       .enable = enable_pint_irq,
+       .disable = disable_pint_irq,
+       .ack = mask_and_ack_pint,
+       .end = end_pint_irq
 };
 
 static void disable_pint_irq(unsigned int irq)
index 099ebbf..f6b16ba 100644 (file)
@@ -48,13 +48,13 @@ static unsigned int startup_intc2_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type intc2_irq_type = {
-       "INTC2-IRQ",
-       startup_intc2_irq,
-       shutdown_intc2_irq,
-       enable_intc2_irq,
-       disable_intc2_irq,
-       mask_and_ack_intc2,
-       end_intc2_irq
+       .typename = "INTC2-IRQ",
+       .startup = startup_intc2_irq,
+       .shutdown = shutdown_intc2_irq,
+       .enable = enable_intc2_irq,
+       .disable = disable_intc2_irq,
+       .ack = mask_and_ack_intc2,
+       .end = end_intc2_irq
 };
 
 static void disable_intc2_irq(unsigned int irq)
index 43f88f3..fc99bf4 100644 (file)
@@ -107,13 +107,13 @@ static void mask_and_ack_intc(unsigned int);
 static void end_intc_irq(unsigned int irq);
 
 static struct hw_interrupt_type intc_irq_type = {
-       "INTC",
-       startup_intc_irq,
-       shutdown_intc_irq,
-       enable_intc_irq,
-       disable_intc_irq,
-       mask_and_ack_intc,
-       end_intc_irq
+       .typename = "INTC",
+       .startup = startup_intc_irq,
+       .shutdown = shutdown_intc_irq,
+       .enable = enable_intc_irq,
+       .disable = disable_intc_irq,
+       .ack = mask_and_ack_intc,
+       .end = end_intc_irq
 };
 
 static int irlm;               /* IRL mode */
index 5d974a2..f848093 100644 (file)
@@ -114,17 +114,7 @@ DOT_ALIAS2(unsigned, urem, unsigned, unsigned)
 /* used by various drivers */
 EXPORT_SYMBOL(sparc_cpu_model);
 EXPORT_SYMBOL(kernel_thread);
-#ifdef CONFIG_DEBUG_SPINLOCK
 #ifdef CONFIG_SMP
-EXPORT_SYMBOL(_do_spin_lock);
-EXPORT_SYMBOL(_do_spin_unlock);
-EXPORT_SYMBOL(_spin_trylock);
-EXPORT_SYMBOL(_do_read_lock);
-EXPORT_SYMBOL(_do_read_unlock);
-EXPORT_SYMBOL(_do_write_lock);
-EXPORT_SYMBOL(_do_write_unlock);
-#endif
-#else
 // XXX find what uses (or used) these.
 EXPORT_SYMBOL(___rw_read_enter);
 EXPORT_SYMBOL(___rw_read_exit);
index 2296ff9..fa50069 100644 (file)
@@ -9,5 +9,3 @@ lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
         strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
         copy_user.o locks.o atomic.o atomic32.o bitops.o \
         lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
-
-lib-$(CONFIG_DEBUG_SPINLOCK) +=        debuglocks.o
diff --git a/arch/sparc/lib/debuglocks.c b/arch/sparc/lib/debuglocks.c
deleted file mode 100644 (file)
index fb18235..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-/* $Id: debuglocks.c,v 1.11 2001/09/20 00:35:31 davem Exp $
- * debuglocks.c: Debugging versions of SMP locking primitives.
- *
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au)
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/threads.h>     /* For NR_CPUS */
-#include <linux/spinlock.h>
-#include <asm/psr.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_SMP
-
-/* Some notes on how these debugging routines work.  When a lock is acquired
- * an extra debugging member lock->owner_pc is set to the caller of the lock
- * acquisition routine.  Right before releasing a lock, the debugging program
- * counter is cleared to zero.
- *
- * Furthermore, since PC's are 4 byte aligned on Sparc, we stuff the CPU
- * number of the owner in the lowest two bits.
- */
-
-#define STORE_CALLER(A) __asm__ __volatile__("mov %%i7, %0" : "=r" (A));
-
-static inline void show(char *str, spinlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-
-       printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n",str,
-               lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
-}
-
-static inline void show_read(char *str, rwlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-
-       printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n", str,
-               lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
-}
-
-static inline void show_write(char *str, rwlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-       int i;
-
-       printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)", str,
-               lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
-
-       for(i = 0; i < NR_CPUS; i++)
-               printk(" reader[%d]=%08lx", i, lock->reader_pc[i]);
-
-       printk("\n");
-}
-
-#undef INIT_STUCK
-#define INIT_STUCK 100000000
-
-void _do_spin_lock(spinlock_t *lock, char *str)
-{
-       unsigned long caller;
-       unsigned long val;
-       int cpu = smp_processor_id();
-       int stuck = INIT_STUCK;
-
-       STORE_CALLER(caller);
-
-again:
-       __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock)));
-       if(val) {
-               while(lock->lock) {
-                       if (!--stuck) {
-                               show(str, lock, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       barrier();
-               }
-               goto again;
-       }
-       lock->owner_pc = (cpu & 3) | (caller & ~3);
-}
-
-int _spin_trylock(spinlock_t *lock)
-{
-       unsigned long val;
-       unsigned long caller;
-       int cpu = smp_processor_id();
-
-       STORE_CALLER(caller);
-
-       __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock)));
-       if(!val) {
-               /* We got it, record our identity for debugging. */
-               lock->owner_pc = (cpu & 3) | (caller & ~3);
-       }
-       return val == 0;
-}
-
-void _do_spin_unlock(spinlock_t *lock)
-{
-       lock->owner_pc = 0;
-       barrier();
-       lock->lock = 0;
-}
-
-void _do_read_lock(rwlock_t *rw, char *str)
-{
-       unsigned long caller;
-       unsigned long val;
-       int cpu = smp_processor_id();
-       int stuck = INIT_STUCK;
-
-       STORE_CALLER(caller);
-
-wlock_again:
-       __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
-       if(val) {
-               while(rw->lock & 0xff) {
-                       if (!--stuck) {
-                               show_read(str, rw, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       barrier();
-               }
-               goto wlock_again;
-       }
-
-       rw->reader_pc[cpu] = caller;
-       barrier();
-       rw->lock++;
-}
-
-void _do_read_unlock(rwlock_t *rw, char *str)
-{
-       unsigned long caller;
-       unsigned long val;
-       int cpu = smp_processor_id();
-       int stuck = INIT_STUCK;
-
-       STORE_CALLER(caller);
-
-wlock_again:
-       __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
-       if(val) {
-               while(rw->lock & 0xff) {
-                       if (!--stuck) {
-                               show_read(str, rw, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       barrier();
-               }
-               goto wlock_again;
-       }
-
-       rw->reader_pc[cpu] = 0;
-       barrier();
-       rw->lock -= 0x1ff;
-}
-
-void _do_write_lock(rwlock_t *rw, char *str)
-{
-       unsigned long caller;
-       unsigned long val;
-       int cpu = smp_processor_id();
-       int stuck = INIT_STUCK;
-
-       STORE_CALLER(caller);
-
-wlock_again:
-       __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
-       if(val) {
-wlock_wait:
-               while(rw->lock) {
-                       if (!--stuck) {
-                               show_write(str, rw, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       barrier();
-               }
-               goto wlock_again;
-       }
-
-       if (rw->lock & ~0xff) {
-               *(((unsigned char *)&rw->lock)+3) = 0;
-               barrier();
-               goto wlock_wait;
-       }
-
-       barrier();
-       rw->owner_pc = (cpu & 3) | (caller & ~3);
-}
-
-void _do_write_unlock(rwlock_t *rw)
-{
-       rw->owner_pc = 0;
-       barrier();
-       rw->lock = 0;
-}
-
-#endif /* SMP */
index 6625543..7d10b03 100644 (file)
@@ -607,11 +607,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
        struct thread_info *t = p->thread_info;
        char *child_trap_frame;
 
-#ifdef CONFIG_DEBUG_SPINLOCK
-       p->thread.smp_lock_count = 0;
-       p->thread.smp_lock_pc = 0;
-#endif
-
        /* Calculate offset to stack_frame & pt_regs */
        child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
        memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
index 7d9a0f6..cbb5e59 100644 (file)
@@ -115,17 +115,12 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data);
 
 /* used by various drivers */
 #ifdef CONFIG_SMP
-#ifndef CONFIG_DEBUG_SPINLOCK
 /* Out of line rw-locking implementation. */
 EXPORT_SYMBOL(__read_lock);
 EXPORT_SYMBOL(__read_unlock);
 EXPORT_SYMBOL(__write_lock);
 EXPORT_SYMBOL(__write_unlock);
 EXPORT_SYMBOL(__write_trylock);
-/* Out of line spin-locking implementation. */
-EXPORT_SYMBOL(_raw_spin_lock);
-EXPORT_SYMBOL(_raw_spin_lock_flags);
-#endif
 
 /* Hard IRQ locking */
 EXPORT_SYMBOL(synchronize_irq);
index 40dbeec..d968aeb 100644 (file)
@@ -14,7 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
         copy_in_user.o user_fixup.o memmove.o \
         mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
 
-lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
 lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
 
 obj-y += iomap.o
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c
deleted file mode 100644 (file)
index f5f0b55..0000000
+++ /dev/null
@@ -1,366 +0,0 @@
-/* $Id: debuglocks.c,v 1.9 2001/11/17 00:10:48 davem Exp $
- * debuglocks.c: Debugging versions of SMP locking primitives.
- *
- * Copyright (C) 1998 David S. Miller (davem@redhat.com)
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_SMP
-
-static inline void show (char *str, spinlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-
-       printk("%s(%p) CPU#%d stuck at %08x, owner PC(%08x):CPU(%x)\n",
-              str, lock, cpu, (unsigned int) caller,
-              lock->owner_pc, lock->owner_cpu);
-}
-
-static inline void show_read (char *str, rwlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-
-       printk("%s(%p) CPU#%d stuck at %08x, writer PC(%08x):CPU(%x)\n",
-              str, lock, cpu, (unsigned int) caller,
-              lock->writer_pc, lock->writer_cpu);
-}
-
-static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-       int i;
-
-       printk("%s(%p) CPU#%d stuck at %08x\n",
-              str, lock, cpu, (unsigned int) caller);
-       printk("Writer: PC(%08x):CPU(%x)\n",
-              lock->writer_pc, lock->writer_cpu);
-       printk("Readers:");
-       for (i = 0; i < NR_CPUS; i++)
-               if (lock->reader_pc[i])
-                       printk(" %d[%08x]", i, lock->reader_pc[i]);
-       printk("\n");
-}
-
-#undef INIT_STUCK
-#define INIT_STUCK 100000000
-
-void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int cpu = get_cpu();
-       int shown = 0;
-
-again:
-       __asm__ __volatile__("ldstub [%1], %0"
-                            : "=r" (val)
-                            : "r" (&(lock->lock))
-                            : "memory");
-       membar_storeload_storestore();
-       if (val) {
-               while (lock->lock) {
-                       if (!--stuck) {
-                               if (shown++ <= 2)
-                                       show(str, lock, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       rmb();
-               }
-               goto again;
-       }
-       lock->owner_pc = ((unsigned int)caller);
-       lock->owner_cpu = cpu;
-       current->thread.smp_lock_count++;
-       current->thread.smp_lock_pc = ((unsigned int)caller);
-
-       put_cpu();
-}
-
-int _do_spin_trylock(spinlock_t *lock, unsigned long caller)
-{
-       unsigned long val;
-       int cpu = get_cpu();
-
-       __asm__ __volatile__("ldstub [%1], %0"
-                            : "=r" (val)
-                            : "r" (&(lock->lock))
-                            : "memory");
-       membar_storeload_storestore();
-       if (!val) {
-               lock->owner_pc = ((unsigned int)caller);
-               lock->owner_cpu = cpu;
-               current->thread.smp_lock_count++;
-               current->thread.smp_lock_pc = ((unsigned int)caller);
-       }
-
-       put_cpu();
-
-       return val == 0;
-}
-
-void _do_spin_unlock(spinlock_t *lock)
-{
-       lock->owner_pc = 0;
-       lock->owner_cpu = NO_PROC_ID;
-       membar_storestore_loadstore();
-       lock->lock = 0;
-       current->thread.smp_lock_count--;
-}
-
-/* Keep INIT_STUCK the same... */
-
-void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int cpu = get_cpu();
-       int shown = 0;
-
-wlock_again:
-       /* Wait for any writer to go away.  */
-       while (((long)(rw->lock)) < 0) {
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_read(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               rmb();
-       }
-       /* Try once to increment the counter.  */
-       __asm__ __volatile__(
-"      ldx             [%0], %%g1\n"
-"      brlz,a,pn       %%g1, 2f\n"
-"       mov            1, %0\n"
-"      add             %%g1, 1, %%g7\n"
-"      casx            [%0], %%g1, %%g7\n"
-"      sub             %%g1, %%g7, %0\n"
-"2:"   : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g1", "g7", "memory");
-       membar_storeload_storestore();
-       if (val)
-               goto wlock_again;
-       rw->reader_pc[cpu] = ((unsigned int)caller);
-       current->thread.smp_lock_count++;
-       current->thread.smp_lock_pc = ((unsigned int)caller);
-
-       put_cpu();
-}
-
-void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int cpu = get_cpu();
-       int shown = 0;
-
-       /* Drop our identity _first_. */
-       rw->reader_pc[cpu] = 0;
-       current->thread.smp_lock_count--;
-runlock_again:
-       /* Spin trying to decrement the counter using casx.  */
-       __asm__ __volatile__(
-"      membar  #StoreLoad | #LoadLoad\n"
-"      ldx     [%0], %%g1\n"
-"      sub     %%g1, 1, %%g7\n"
-"      casx    [%0], %%g1, %%g7\n"
-"      membar  #StoreLoad | #StoreStore\n"
-"      sub     %%g1, %%g7, %0\n"
-       : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g1", "g7", "memory");
-       if (val) {
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_read(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               goto runlock_again;
-       }
-
-       put_cpu();
-}
-
-void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int cpu = get_cpu();
-       int shown = 0;
-
-wlock_again:
-       /* Spin while there is another writer. */
-       while (((long)rw->lock) < 0) {
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_write(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               rmb();
-       }
-
-       /* Try to acuire the write bit.  */
-       __asm__ __volatile__(
-"      mov     1, %%g3\n"
-"      sllx    %%g3, 63, %%g3\n"
-"      ldx     [%0], %%g1\n"
-"      brlz,pn %%g1, 1f\n"
-"       or     %%g1, %%g3, %%g7\n"
-"      casx    [%0], %%g1, %%g7\n"
-"      membar  #StoreLoad | #StoreStore\n"
-"      ba,pt   %%xcc, 2f\n"
-"       sub    %%g1, %%g7, %0\n"
-"1:    mov     1, %0\n"
-"2:"   : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g3", "g1", "g7", "memory");
-       if (val) {
-               /* We couldn't get the write bit. */
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_write(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               goto wlock_again;
-       }
-       if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
-               /* Readers still around, drop the write
-                * lock, spin, and try again.
-                */
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_write(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               __asm__ __volatile__(
-"              mov     1, %%g3\n"
-"              sllx    %%g3, 63, %%g3\n"
-"1:            ldx     [%0], %%g1\n"
-"              andn    %%g1, %%g3, %%g7\n"
-"              casx    [%0], %%g1, %%g7\n"
-"              cmp     %%g1, %%g7\n"
-"              membar  #StoreLoad | #StoreStore\n"
-"              bne,pn  %%xcc, 1b\n"
-"               nop"
-               : /* no outputs */
-               : "r" (&(rw->lock))
-               : "g3", "g1", "g7", "cc", "memory");
-               while(rw->lock != 0) {
-                       if (!--stuck) {
-                               if (shown++ <= 2)
-                                       show_write(str, rw, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       rmb();
-               }
-               goto wlock_again;
-       }
-
-       /* We have it, say who we are. */
-       rw->writer_pc = ((unsigned int)caller);
-       rw->writer_cpu = cpu;
-       current->thread.smp_lock_count++;
-       current->thread.smp_lock_pc = ((unsigned int)caller);
-
-       put_cpu();
-}
-
-void _do_write_unlock(rwlock_t *rw, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int shown = 0;
-
-       /* Drop our identity _first_ */
-       rw->writer_pc = 0;
-       rw->writer_cpu = NO_PROC_ID;
-       current->thread.smp_lock_count--;
-wlock_again:
-       __asm__ __volatile__(
-"      membar  #StoreLoad | #LoadLoad\n"
-"      mov     1, %%g3\n"
-"      sllx    %%g3, 63, %%g3\n"
-"      ldx     [%0], %%g1\n"
-"      andn    %%g1, %%g3, %%g7\n"
-"      casx    [%0], %%g1, %%g7\n"
-"      membar  #StoreLoad | #StoreStore\n"
-"      sub     %%g1, %%g7, %0\n"
-       : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g3", "g1", "g7", "memory");
-       if (val) {
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_write("write_unlock", rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               goto wlock_again;
-       }
-}
-
-int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int cpu = get_cpu();
-
-       /* Try to acuire the write bit.  */
-       __asm__ __volatile__(
-"      mov     1, %%g3\n"
-"      sllx    %%g3, 63, %%g3\n"
-"      ldx     [%0], %%g1\n"
-"      brlz,pn %%g1, 1f\n"
-"       or     %%g1, %%g3, %%g7\n"
-"      casx    [%0], %%g1, %%g7\n"
-"      membar  #StoreLoad | #StoreStore\n"
-"      ba,pt   %%xcc, 2f\n"
-"       sub    %%g1, %%g7, %0\n"
-"1:    mov     1, %0\n"
-"2:"   : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g3", "g1", "g7", "memory");
-
-       if (val) {
-               put_cpu();
-               return 0;
-       }
-
-       if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
-               /* Readers still around, drop the write
-                * lock, return failure.
-                */
-               __asm__ __volatile__(
-"              mov     1, %%g3\n"
-"              sllx    %%g3, 63, %%g3\n"
-"1:            ldx     [%0], %%g1\n"
-"              andn    %%g1, %%g3, %%g7\n"
-"              casx    [%0], %%g1, %%g7\n"
-"              cmp     %%g1, %%g7\n"
-"              membar  #StoreLoad | #StoreStore\n"
-"              bne,pn  %%xcc, 1b\n"
-"               nop"
-               : /* no outputs */
-               : "r" (&(rw->lock))
-               : "g3", "g1", "g7", "cc", "memory");
-
-               put_cpu();
-
-               return 0;
-       }
-
-       /* We have it, say who we are. */
-       rw->writer_pc = ((unsigned int)caller);
-       rw->writer_cpu = cpu;
-       current->thread.smp_lock_count++;
-       current->thread.smp_lock_pc = ((unsigned int)caller);
-
-       put_cpu();
-
-       return 1;
-}
-
-#endif /* CONFIG_SMP */
index 336cbf2..9e85969 100644 (file)
@@ -67,13 +67,13 @@ static void ack_none(unsigned int irq)
 #define end_none       enable_none
 
 struct hw_interrupt_type no_irq_type = {
-       "none",
-       startup_none,
-       shutdown_none,
-       enable_none,
-       disable_none,
-       ack_none,
-       end_none
+       .typename = "none",
+       .startup = startup_none,
+       .shutdown = shutdown_none,
+       .enable = enable_none,
+       .disable = disable_none,
+       .ack = ack_none,
+       .end = end_none
 };
 
 volatile unsigned long irq_err_count, spurious_count;
index abd4840..62bdb8d 100644 (file)
@@ -138,13 +138,13 @@ static void nmi_end (unsigned irq)
 }
 
 static struct hw_interrupt_type nmi_irq_type = {
-       "NMI",
-       irq_zero,               /* startup */
-       irq_nop,                /* shutdown */
-       irq_nop,                /* enable */
-       irq_nop,                /* disable */
-       irq_nop,                /* ack */
-       nmi_end,                /* end */
+       .typename = "NMI",
+       .startup = irq_zero,            /* startup */
+       .shutdown = irq_nop,            /* shutdown */
+       .enable = irq_nop,              /* enable */
+       .disable = irq_nop,             /* disable */
+       .ack = irq_nop,         /* ack */
+       .end = nmi_end,         /* end */
 };
 
 void __init init_IRQ (void)
index e2cc558..17049aa 100644 (file)
@@ -73,13 +73,13 @@ static void irq_nop (unsigned irq) { }
 static unsigned irq_zero (unsigned irq) { return 0; }
 
 static struct hw_interrupt_type sim_irq_type = {
-       "IRQ",
-       irq_zero,               /* startup */
-       irq_nop,                /* shutdown */
-       irq_nop,                /* enable */
-       irq_nop,                /* disable */
-       irq_nop,                /* ack */
-       irq_nop,                /* end */
+       .typename = "IRQ",
+       .startup = irq_zero,            /* startup */
+       .shutdown = irq_nop,            /* shutdown */
+       .enable = irq_nop,              /* enable */
+       .disable = irq_nop,             /* disable */
+       .ack = irq_nop,         /* ack */
+       .end = irq_nop,         /* end */
 };
 
 void __init mach_init_irqs (void)
index 0511d80..9aec524 100644 (file)
@@ -929,7 +929,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
        c->x86_num_cores = intel_num_cpu_cores(c);
 }
 
-void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
 {
        char *v = c->x86_vendor_id;
 
index 7249ba2..aee50b4 100644 (file)
@@ -23,7 +23,6 @@ u8 sleep_states[ACPI_S_STATE_COUNT];
 
 static struct pm_ops acpi_pm_ops;
 
-extern void do_suspend_lowlevel_s4bios(void);
 extern void do_suspend_lowlevel(void);
 
 static u32 acpi_suspend_states[] = {
@@ -98,8 +97,6 @@ static int acpi_pm_enter(suspend_state_t pm_state)
        case PM_SUSPEND_DISK:
                if (acpi_pm_ops.pm_disk_mode == PM_DISK_PLATFORM)
                        status = acpi_enter_sleep_state(acpi_state);
-               else
-                       do_suspend_lowlevel_s4bios();
                break;
        case PM_SUSPEND_MAX:
                acpi_power_off();
@@ -206,11 +203,6 @@ static int __init acpi_sleep_init(void)
                        printk(" S%d", i);
                }
                if (i == ACPI_STATE_S4) {
-                       if (acpi_gbl_FACS->S4bios_f) {
-                               sleep_states[i] = 1;
-                               printk(" S4bios");
-                               acpi_pm_ops.pm_disk_mode = PM_DISK_FIRMWARE;
-                       }
                        if (sleep_states[i])
                                acpi_pm_ops.pm_disk_mode = PM_DISK_PLATFORM;
                }
index a5f947d..af7935a 100644 (file)
@@ -21,9 +21,7 @@ int acpi_sleep_prepare(u32 acpi_state)
 {
 #ifdef CONFIG_ACPI_SLEEP
        /* do we have a wakeup address for S2 and S3? */
-       /* Here, we support only S4BIOS, those we set the wakeup address */
-       /* S4OS is only supported for now via swsusp.. */
-       if (acpi_state == ACPI_STATE_S3 || acpi_state == ACPI_STATE_S4) {
+       if (acpi_state == ACPI_STATE_S3) {
                if (!acpi_wakeup_address) {
                        return -EFAULT;
                }
index 09a603f..4696a85 100644 (file)
@@ -25,8 +25,6 @@ static int acpi_system_sleep_seq_show(struct seq_file *seq, void *offset)
        for (i = 0; i <= ACPI_STATE_S5; i++) {
                if (sleep_states[i]) {
                        seq_printf(seq, "S%d ", i);
-                       if (i == ACPI_STATE_S4 && acpi_gbl_FACS->S4bios_f)
-                               seq_printf(seq, "S4bios ");
                }
        }
 
index c4aebf2..60a7ef6 100644 (file)
@@ -262,7 +262,8 @@ dma_pool_destroy (struct dma_pool *pool)
  * If such a memory block can't be allocated, null is returned.
  */
 void *
-dma_pool_alloc (struct dma_pool *pool, int mem_flags, dma_addr_t *handle)
+dma_pool_alloc (struct dma_pool *pool, unsigned int __nocast mem_flags,
+               dma_addr_t *handle)
 {
        unsigned long           flags;
        struct dma_page         *page;
index 418b146..28f2c17 100644 (file)
@@ -1713,10 +1713,9 @@ static unsigned long pollcomplete(int ctlr)
 
        for (i = 20 * HZ; i > 0; i--) {
                done = hba[ctlr]->access.command_completed(hba[ctlr]);
-               if (done == FIFO_EMPTY) {
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(1);
-               } else
+               if (done == FIFO_EMPTY)
+                       schedule_timeout_uninterruptible(1);
+               else
                        return (done);
        }
        /* Invalid address to tell caller we ran out of time */
index 30c0903..cd056e7 100644 (file)
@@ -2260,6 +2260,8 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
        if (!atomic_dec_and_test(&cfqd->ref))
                return;
 
+       blk_put_queue(q);
+
        cfq_shutdown_timer_wq(cfqd);
        q->elevator->elevator_data = NULL;
 
@@ -2316,6 +2318,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
        e->elevator_data = cfqd;
 
        cfqd->queue = q;
+       atomic_inc(&q->refcnt);
 
        cfqd->max_queued = q->nr_requests / 4;
        q->nr_batching = cfq_queued;
index 7289f67..ac5ba46 100644 (file)
@@ -516,8 +516,7 @@ static int pcd_tray_move(struct cdrom_device_info *cdi, int position)
 
 static void pcd_sleep(int cs)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(cs);
+       schedule_timeout_interruptible(cs);
 }
 
 static int pcd_reset(struct pcd_unit *cd)
index 060b1f2..711d2f3 100644 (file)
@@ -507,8 +507,7 @@ static void pf_eject(struct pf_unit *pf)
 
 static void pf_sleep(int cs)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(cs);
+       schedule_timeout_interruptible(cs);
 }
 
 /* the ATAPI standard actually specifies the contents of all 7 registers
index 84d8e29..b398239 100644 (file)
@@ -276,8 +276,7 @@ static inline u8 DRIVE(struct pg *dev)
 
 static void pg_sleep(int cs)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(cs);
+       schedule_timeout_interruptible(cs);
 }
 
 static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg)
index 5fe8ee8..d8d3523 100644 (file)
@@ -383,8 +383,7 @@ static int pt_atapi(struct pt_unit *tape, char *cmd, int dlen, char *buf, char *
 
 static void pt_sleep(int cs)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(cs);
+       schedule_timeout_interruptible(cs);
 }
 
 static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg)
index e5f7494..e425ad3 100644 (file)
@@ -834,8 +834,7 @@ static int fd_eject(struct floppy_state *fs)
                        break;
                }
                swim3_select(fs, RELAX);
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(1);
+               schedule_timeout_interruptible(1);
                if (swim3_readbit(fs, DISK_IN) == 0)
                        break;
        }
@@ -906,8 +905,7 @@ static int floppy_open(struct inode *inode, struct file *filp)
                                break;
                        }
                        swim3_select(fs, RELAX);
-                       current->state = TASK_INTERRUPTIBLE;
-                       schedule_timeout(1);
+                       schedule_timeout_interruptible(1);
                }
                if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0
                                 || swim3_readbit(fs, DISK_IN) == 0))
@@ -992,8 +990,7 @@ static int floppy_revalidate(struct gendisk *disk)
                if (signal_pending(current))
                        break;
                swim3_select(fs, RELAX);
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(1);
+               schedule_timeout_interruptible(1);
        }
        ret = swim3_readbit(fs, SEEK_COMPLETE) == 0
                || swim3_readbit(fs, DISK_IN) == 0;
index a1283f6..89e3c2f 100644 (file)
@@ -338,8 +338,7 @@ static int swimiop_eject(struct floppy_state *fs)
                        err = -EINTR;
                        break;
                }
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(1);
+               schedule_timeout_interruptible(1);
        }
        release_drive(fs);
        return cmd->error;
index 0c4c121..0f48301 100644 (file)
@@ -34,6 +34,7 @@
  *                      - set initialised bit then.
  */
 
+//#define DEBUG /* uncomment if you want debugging info (pr_debug) */
 #include <linux/config.h>
 #include <linux/sched.h>
 #include <linux/fs.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
-#define PRINTK(x...) do {} while (0)
-#define dprintk(x...) do {} while (0)
-/*#define dprintk(x...) printk(x) */
-
 #define MM_MAXCARDS 4
 #define MM_RAHEAD 2      /* two sectors */
 #define MM_BLKSIZE 1024  /* 1k blocks */
@@ -299,7 +296,7 @@ static void mm_start_io(struct cardinfo *card)
 
        /* make the last descriptor end the chain */
        page = &card->mm_pages[card->Active];
-       PRINTK("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1);
+       pr_debug("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1);
        desc = &page->desc[page->cnt-1];
 
        desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN);
@@ -532,7 +529,7 @@ static void process_page(unsigned long data)
                activate(card);
        } else {
                /* haven't finished with this one yet */
-               PRINTK("do some more\n");
+               pr_debug("do some more\n");
                mm_start_io(card);
        }
  out_unlock:
@@ -555,7 +552,7 @@ static void process_page(unsigned long data)
 static int mm_make_request(request_queue_t *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
-       PRINTK("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size);
+       pr_debug("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size);
 
        bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/
        spin_lock_irq(&card->lock);
index 1676033..68b6d7b 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/wait.h>
 #include <linux/blkdev.h>
 #include <linux/blkpg.h>
+#include <linux/delay.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
@@ -62,7 +63,7 @@ static int xd[5] = { -1,-1,-1,-1, };
 
 #define XD_DONT_USE_DMA                0  /* Initial value. may be overriden using
                                      "nodma" module option */
-#define XD_INIT_DISK_DELAY     (30*HZ/1000)  /* 30 ms delay during disk initialization */
+#define XD_INIT_DISK_DELAY     (30)  /* 30 ms delay during disk initialization */
 
 /* Above may need to be increased if a problem with the 2nd drive detection
    (ST11M controller) or resetting a controller (WD) appears */
@@ -529,10 +530,8 @@ static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long t
        int success;
 
        xdc_busy = 1;
-       while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry)) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry))
+               schedule_timeout_uninterruptible(1);
        xdc_busy = 0;
        return (success);
 }
@@ -633,14 +632,12 @@ static u_char __init xd_initdrives (void (*init_drive)(u_char drive))
        for (i = 0; i < XD_MAXDRIVES; i++) {
                xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0);
                if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(XD_INIT_DISK_DELAY);
+                       msleep_interruptible(XD_INIT_DISK_DELAY);
 
                        init_drive(count);
                        count++;
 
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(XD_INIT_DISK_DELAY);
+                       msleep_interruptible(XD_INIT_DISK_DELAY);
                }
        }
        return (count);
@@ -761,8 +758,7 @@ static void __init xd_wd_init_controller (unsigned int address)
 
        outb(0,XD_RESET);               /* reset the controller */
 
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(XD_INIT_DISK_DELAY);
+       msleep(XD_INIT_DISK_DELAY);
 }
 
 static void __init xd_wd_init_drive (u_char drive)
@@ -936,8 +932,7 @@ If you need non-standard settings use the xd=... command */
        xd_maxsectors = 0x01;
        outb(0,XD_RESET);               /* reset the controller */
 
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(XD_INIT_DISK_DELAY);
+       msleep(XD_INIT_DISK_DELAY);
 }
 
 static void __init xd_xebec_init_drive (u_char drive)
index 007f6a6..bb5e8d6 100644 (file)
@@ -296,7 +296,7 @@ z2_open( struct inode *inode, struct file *filp )
     return 0;
 
 err_out_kfree:
-    kfree( z2ram_map );
+    kfree(z2ram_map);
 err_out:
     return rc;
 }
index 30a8977..466e9c2 100644 (file)
@@ -827,8 +827,7 @@ static void mark_timeout_audio(u_long i)
 static void sbp_sleep(u_int time)
 {
        sti();
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(time);
+       schedule_timeout_interruptible(time);
        sti();
 }
 /*==========================================================================*/
@@ -4216,7 +4215,8 @@ static int sbpcd_dev_ioctl(struct cdrom_device_info *cdi, u_int cmd,
                
        case CDROMAUDIOBUFSIZ: /* configure the audio buffer size */
                msg(DBG_IOC,"ioctl: CDROMAUDIOBUFSIZ entered.\n");
-               if (current_drive->sbp_audsiz>0) vfree(current_drive->aud_buf);
+               if (current_drive->sbp_audsiz>0)
+                       vfree(current_drive->aud_buf);
                current_drive->aud_buf=NULL;
                current_drive->sbp_audsiz=arg;
                
@@ -5910,7 +5910,8 @@ static void sbpcd_exit(void)
                put_disk(D_S[j].disk);
                devfs_remove("sbp/c0t%d", j);
                vfree(D_S[j].sbp_buf);
-               if (D_S[j].sbp_audsiz>0) vfree(D_S[j].aud_buf);
+               if (D_S[j].sbp_audsiz>0)
+                       vfree(D_S[j].aud_buf);
                if ((unregister_cdrom(D_S[j].sbpcd_infop) == -EINVAL))
                {
                        msg(DBG_INF, "What's that: can't unregister info %s.\n", major_name);
index 9f22e8f..e656599 100644 (file)
@@ -1478,8 +1478,7 @@ static int __init sony535_init(void)
        /* look for the CD-ROM, follows the procedure in the DOS driver */
        inb(select_unit_reg);
        /* wait for 40 18 Hz ticks (reverse-engineered from DOS driver) */
-       set_current_state(TASK_INTERRUPTIBLE);
-       schedule_timeout((HZ+17)*40/18);
+       schedule_timeout_interruptible((HZ+17)*40/18);
        inb(result_reg);
 
        outb(0, read_status_reg);       /* does a reset? */
index 4d4e602..82b43c5 100644 (file)
@@ -206,10 +206,9 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
                bridge->driver->cleanup();
        if (bridge->driver->free_gatt_table)
                bridge->driver->free_gatt_table(bridge);
-       if (bridge->key_list) {
-               vfree(bridge->key_list);
-               bridge->key_list = NULL;
-       }
+
+       vfree(bridge->key_list);
+       bridge->key_list = NULL;
 
        if (bridge->driver->agp_destroy_page &&
            bridge->driver->needs_scratch_page)
index 11f9ee5..927a5bb 100644 (file)
@@ -172,7 +172,7 @@ static int ac_register_board(unsigned long physloc, void __iomem *loc,
 
 void cleanup_module(void)
 {
-       int i;
+       unsigned int i;
 
        misc_deregister(&ac_miscdev);
 
@@ -195,7 +195,7 @@ int __init applicom_init(void)
        int i, numisa = 0;
        struct pci_dev *dev = NULL;
        void __iomem *RamIO;
-       int boardno;
+       int boardno, ret;
 
        printk(KERN_INFO "Applicom driver: $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $\n");
 
@@ -294,7 +294,8 @@ int __init applicom_init(void)
        }
 
        if (!numisa)
-               printk(KERN_WARNING"ac.o: No valid ISA Applicom boards found at mem 0x%lx\n",mem);
+               printk(KERN_WARNING "ac.o: No valid ISA Applicom boards found "
+                               "at mem 0x%lx\n", mem);
 
  fin:
        init_waitqueue_head(&FlagSleepRec);
@@ -304,7 +305,11 @@ int __init applicom_init(void)
        DeviceErrorCount = 0;
 
        if (numboards) {
-               misc_register(&ac_miscdev);
+               ret = misc_register(&ac_miscdev);
+               if (ret) {
+                       printk(KERN_WARNING "ac.o: Unable to register misc device\n");
+                       goto out;
+               }
                for (i = 0; i < MAX_BOARD; i++) {
                        int serial;
                        char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];
@@ -337,6 +342,17 @@ int __init applicom_init(void)
 
        else
                return -ENXIO;
+
+out:
+       for (i = 0; i < MAX_BOARD; i++) {
+               if (!apbs[i].RamIO)
+                       continue;
+               if (apbs[i].irq)
+                       free_irq(apbs[i].irq, &dummy);
+               iounmap(apbs[i].RamIO);
+       }
+       pci_disable_device(dev);
+       return ret;
 }
 
 
index 1704a2a..b2e0928 100644 (file)
@@ -387,10 +387,8 @@ int fdc_interrupt_wait(unsigned int time)
 
        set_current_state(TASK_INTERRUPTIBLE);
        add_wait_queue(&ftape_wait_intr, &wait);
-       while (!ft_interrupt_seen && timeout) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               timeout = schedule_timeout(timeout);
-        }
+       while (!ft_interrupt_seen && timeout)
+               timeout = schedule_timeout_interruptible(timeout);
 
        spin_lock_irq(&current->sighand->siglock);
        current->blocked = old_sigmask;
index 5fe8461..de0379b 100644 (file)
@@ -100,14 +100,14 @@ static struct hpets *hpets;
 #endif
 
 #ifndef readq
-static unsigned long long __inline readq(void __iomem *addr)
+static inline unsigned long long readq(void __iomem *addr)
 {
        return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
 }
 #endif
 
 #ifndef writeq
-static void __inline writeq(unsigned long long v, void __iomem *addr)
+static inline void writeq(unsigned long long v, void __iomem *addr)
 {
        writel(v & 0xffffffff, addr);
        writel(v >> 32, addr + 4);
index 3480535..6f673d2 100644 (file)
@@ -513,10 +513,7 @@ static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
                        return ret ? : -EAGAIN;
 
                if(need_resched())
-               {
-                       current->state = TASK_INTERRUPTIBLE;
-                       schedule_timeout(1);
-               }
+                       schedule_timeout_interruptible(1);
                else
                        udelay(200);    /* FIXME: We could poll for 250uS ?? */
 
index 82c5f30..ba85eb1 100644 (file)
@@ -655,8 +655,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
                        timeout--;   // So negative values == forever
                
                if (!in_interrupt()) {
-                       current->state = TASK_INTERRUPTIBLE;
-                       schedule_timeout(1);    // short nap 
+                       schedule_timeout_interruptible(1);      // short nap
                } else {
                        // we cannot sched/sleep in interrrupt silly
                        return 0;   
@@ -1132,8 +1131,7 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count, int user )
 
                                        ip2trace (CHANN, ITRC_OUTPUT, 61, 0 );
 
-                                       current->state = TASK_INTERRUPTIBLE;
-                                       schedule_timeout(2);
+                                       schedule_timeout_interruptible(2);
                                        if (signal_pending(current)) {
                                                break;
                                        }
index 278f841..b6e5cbf 100644 (file)
@@ -1920,8 +1920,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
        for (;;)
        {
                if (smi_result == SI_SM_CALL_WITH_DELAY) {
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(1);
+                       schedule_timeout_uninterruptible(1);
                        smi_result = smi_info->handlers->event(
                                smi_info->si_sm, 100);
                }
@@ -2256,10 +2255,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
 
        /* Wait for the timer to stop.  This avoids problems with race
           conditions removing the timer here. */
-       while (! new_smi->timer_stopped) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while (!new_smi->timer_stopped)
+               schedule_timeout_uninterruptible(1);
 
  out_err:
        if (new_smi->intf)
@@ -2379,17 +2376,14 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
 
        /* Wait for the timer to stop.  This avoids problems with race
           conditions removing the timer here. */
-       while (! to_clean->timer_stopped) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while (!to_clean->timer_stopped)
+               schedule_timeout_uninterruptible(1);
 
        /* Interrupts and timeouts are stopped, now make sure the
           interface is in a clean state. */
        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
                poll(to_clean);
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
+               schedule_timeout_uninterruptible(1);
        }
 
        rv = ipmi_unregister_smi(to_clean->intf);
diff --git