Merge master.kernel.org:/pub/scm/linux/kernel/git/dtor/input
authorLinus Torvalds <torvalds@g5.osdl.org>
Sat, 10 Sep 2005 22:54:41 +0000 (15:54 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sat, 10 Sep 2005 22:54:41 +0000 (15:54 -0700)
288 files changed:
COPYING
Documentation/00-INDEX
Documentation/CodingStyle
Documentation/DMA-API.txt
Documentation/DocBook/journal-api.tmpl
Documentation/DocBook/usb.tmpl
Documentation/MSI-HOWTO.txt
Documentation/RCU/RTFP.txt
Documentation/RCU/UP.txt
Documentation/RCU/checklist.txt
Documentation/RCU/rcu.txt
Documentation/RCU/whatisRCU.txt [new file with mode: 0644]
Documentation/cpu-freq/cpufreq-stats.txt
Documentation/cpusets.txt
Documentation/crypto/descore-readme.txt
Documentation/feature-removal-schedule.txt
Documentation/ioctl/cdrom.txt
Documentation/mono.txt
Documentation/networking/bonding.txt
Documentation/networking/wan-router.txt
Documentation/pci.txt
Documentation/powerpc/eeh-pci-error-recovery.txt
Documentation/s390/s390dbf.txt
Documentation/scsi/ibmmca.txt
Documentation/sound/alsa/ALSA-Configuration.txt
Documentation/sysrq.txt
Documentation/uml/UserModeLinux-HOWTO.txt
Documentation/usb/gadget_serial.txt
Documentation/video4linux/Zoran
Kbuild
Makefile
REPORTING-BUGS
arch/alpha/kernel/alpha_ksyms.c
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/smp.c
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/common/scoop.c
arch/arm/kernel/calls.S
arch/arm/kernel/entry-common.S
arch/arm/mach-pxa/corgi.c
arch/i386/kernel/acpi/wakeup.S
arch/i386/kernel/cpu/common.c
arch/i386/kernel/io_apic.c
arch/i386/kernel/smpboot.c
arch/i386/kernel/vmlinux.lds.S
arch/ia64/kernel/mca.c
arch/m32r/Kconfig
arch/m32r/kernel/smp.c
arch/mips/kernel/irixsig.c
arch/mips/kernel/sysirix.c
arch/mips/lib/dec_and_lock.c
arch/parisc/lib/Makefile
arch/parisc/lib/bitops.c
arch/parisc/lib/debuglocks.c [deleted file]
arch/ppc/Kconfig
arch/ppc/Makefile
arch/ppc/kernel/cpu_setup_6xx.S
arch/ppc/kernel/cpu_setup_power4.S
arch/ppc/kernel/dma-mapping.c
arch/ppc/kernel/head.S
arch/ppc/kernel/idle.c
arch/ppc/kernel/smp.c
arch/ppc/kernel/traps.c
arch/ppc/lib/Makefile
arch/ppc/lib/dec_and_lock.c
arch/ppc/mm/fault.c
arch/ppc/platforms/pmac_sleep.S
arch/ppc/platforms/pmac_smp.c
arch/ppc/syslib/cpc700_pic.c
arch/ppc/syslib/i8259.c
arch/ppc/syslib/open_pic2.c
arch/ppc/syslib/ppc403_pic.c
arch/ppc/syslib/xilinx_pic.c
arch/ppc64/Makefile
arch/ppc64/kernel/cpu_setup_power4.S
arch/ppc64/lib/dec_and_lock.c
arch/ppc64/lib/locks.c
arch/s390/lib/spinlock.c
arch/sh/boards/adx/irq_maskreg.c
arch/sh/boards/bigsur/io.c
arch/sh/boards/bigsur/irq.c
arch/sh/boards/cqreek/irq.c
arch/sh/boards/harp/irq.c
arch/sh/boards/overdrive/irq.c
arch/sh/boards/renesas/hs7751rvoip/irq.c
arch/sh/boards/renesas/rts7751r2d/irq.c
arch/sh/boards/renesas/systemh/irq.c
arch/sh/boards/superh/microdev/irq.c
arch/sh/cchips/hd6446x/hd64465/io.c
arch/sh/cchips/voyagergx/irq.c
arch/sh/kernel/cpu/irq_imask.c
arch/sh/kernel/cpu/irq_ipr.c
arch/sh/kernel/cpu/sh4/irq_intc2.c
arch/sh64/kernel/irq_intc.c
arch/sparc/kernel/sparc_ksyms.c
arch/sparc/lib/Makefile
arch/sparc/lib/debuglocks.c [deleted file]
arch/sparc64/kernel/process.c
arch/sparc64/kernel/sparc64_ksyms.c
arch/sparc64/lib/Makefile
arch/sparc64/lib/debuglocks.c [deleted file]
arch/um/Makefile
arch/um/include/mem.h
arch/um/kernel/dyn.lds.S
arch/um/kernel/ksyms.c
arch/um/kernel/physmem.c
arch/um/kernel/trap_kern.c
arch/um/kernel/uml.lds.S
arch/v850/kernel/irq.c
arch/v850/kernel/setup.c
arch/v850/kernel/sim.c
arch/x86_64/kernel/setup.c
arch/x86_64/kernel/vmlinux.lds.S
drivers/acpi/sleep/main.c
drivers/acpi/sleep/poweroff.c
drivers/acpi/sleep/proc.c
drivers/base/dmapool.c
drivers/block/cciss.c
drivers/block/cfq-iosched.c
drivers/block/paride/pcd.c
drivers/block/paride/pf.c
drivers/block/paride/pg.c
drivers/block/paride/pt.c
drivers/block/swim3.c
drivers/block/swim_iop.c
drivers/block/umem.c
drivers/block/xd.c
drivers/block/z2ram.c
drivers/cdrom/sbpcd.c
drivers/cdrom/sonycd535.c
drivers/char/agp/backend.c
drivers/char/applicom.c
drivers/char/ftape/lowlevel/fdc-io.c
drivers/char/hpet.c
drivers/char/hw_random.c
drivers/char/ip2/i2lib.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_watchdog.c
drivers/char/lcd.c
drivers/char/lp.c
drivers/char/mxser.c
drivers/char/n_tty.c
drivers/char/pcmcia/synclink_cs.c
drivers/ide/ide-io.c
drivers/ide/ide-tape.c
drivers/ide/ide-timing.h
drivers/ide/legacy/ide-cs.c
drivers/isdn/i4l/isdn_bsdcomp.c
drivers/isdn/i4l/isdn_common.c
drivers/md/dm-exception-store.c
drivers/md/md.c
drivers/media/common/saa7146_core.c
drivers/media/video/cpia_usb.c
drivers/media/video/stradis.c
drivers/media/video/video-buf.c
drivers/media/video/zoran_driver.c
drivers/media/video/zr36120.c
drivers/misc/hdpuftrs/hdpu_cpustate.c
drivers/mtd/devices/mtdram.c
drivers/mtd/ftl.c
drivers/net/bsd_comp.c
drivers/net/ppp_generic.c
drivers/net/tulip/de4x5.c
drivers/parisc/lasi.c
drivers/parport/ieee1284.c
drivers/parport/ieee1284_ops.c
drivers/parport/parport_pc.c
drivers/pci/pci-sysfs.c
drivers/pci/probe.c
drivers/sbus/char/bbc_envctrl.c
drivers/sbus/char/envctrl.c
drivers/scsi/53c7xx.c
drivers/scsi/ch.c
drivers/scsi/cpqfcTSinit.c
drivers/scsi/ibmmca.c
drivers/scsi/osst.c
drivers/serial/8250.c
drivers/telephony/ixj.c
drivers/usb/media/stv680.c
drivers/video/vgastate.c
fs/buffer.c
fs/cifs/connect.c
fs/cramfs/uncompress.c
fs/dcache.c
fs/jbd/transaction.c
fs/jffs/intrep.c
fs/lockd/clntproc.c
fs/namespace.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4proc.c
fs/ntfs/aops.c
fs/pipe.c
fs/reiserfs/journal.c
fs/reiserfs/super.c
fs/smbfs/proc.c
fs/xfs/linux-2.6/time.h
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_super.c
include/asm-alpha/spinlock.h
include/asm-alpha/spinlock_types.h [new file with mode: 0644]
include/asm-arm/spinlock.h
include/asm-arm/spinlock_types.h [new file with mode: 0644]
include/asm-arm/unistd.h
include/asm-arm26/hardirq.h
include/asm-generic/vmlinux.lds.h
include/asm-i386/div64.h
include/asm-i386/processor.h
include/asm-i386/spinlock.h
include/asm-i386/spinlock_types.h [new file with mode: 0644]
include/asm-ia64/spinlock.h
include/asm-ia64/spinlock_types.h [new file with mode: 0644]
include/asm-m32r/spinlock.h
include/asm-m32r/spinlock_types.h [new file with mode: 0644]
include/asm-mips/spinlock.h
include/asm-mips/spinlock_types.h [new file with mode: 0644]
include/asm-parisc/atomic.h
include/asm-parisc/bitops.h
include/asm-parisc/cacheflush.h
include/asm-parisc/processor.h
include/asm-parisc/spinlock.h
include/asm-parisc/spinlock_types.h [new file with mode: 0644]
include/asm-parisc/system.h
include/asm-ppc/smp.h
include/asm-ppc/spinlock.h
include/asm-ppc/spinlock_types.h [new file with mode: 0644]
include/asm-ppc/system.h
include/asm-ppc64/spinlock.h
include/asm-ppc64/spinlock_types.h [new file with mode: 0644]
include/asm-s390/spinlock.h
include/asm-s390/spinlock_types.h [new file with mode: 0644]
include/asm-sh/spinlock.h
include/asm-sh/spinlock_types.h [new file with mode: 0644]
include/asm-sparc/spinlock.h
include/asm-sparc/spinlock_types.h [new file with mode: 0644]
include/asm-sparc64/spinlock.h
include/asm-sparc64/spinlock_types.h [new file with mode: 0644]
include/asm-um/page.h
include/asm-um/pgtable.h
include/asm-x86_64/proto.h
include/asm-x86_64/spinlock.h
include/asm-x86_64/spinlock_types.h [new file with mode: 0644]
include/linux/bio.h
include/linux/bit_spinlock.h [new file with mode: 0644]
include/linux/blkdev.h
include/linux/chio.h
include/linux/dmapool.h
include/linux/fs.h
include/linux/jbd.h
include/linux/jiffies.h
include/linux/radix-tree.h
include/linux/reiserfs_fs.h
include/linux/sched.h
include/linux/slab.h
include/linux/spinlock.h
include/linux/spinlock_api_smp.h [new file with mode: 0644]
include/linux/spinlock_api_up.h [new file with mode: 0644]
include/linux/spinlock_types.h [new file with mode: 0644]
include/linux/spinlock_types_up.h [new file with mode: 0644]
include/linux/spinlock_up.h [new file with mode: 0644]
include/linux/time.h
include/linux/writeback.h
ipc/mqueue.c
kernel/Makefile
kernel/acct.c
kernel/compat.c
kernel/cpuset.c
kernel/sched.c
kernel/signal.c
kernel/spinlock.c
kernel/timer.c
lib/Makefile
lib/dec_and_lock.c
lib/kernel_lock.c
lib/radix-tree.c
lib/sort.c
lib/spinlock_debug.c [new file with mode: 0644]
mm/filemap.c
mm/memory.c
mm/oom_kill.c
mm/page_alloc.c
mm/slab.c
mm/swap_state.c
mm/swapfile.c
scripts/Kbuild.include
scripts/reference_discarded.pl
sound/isa/sb/sb16_csp.c
sound/oss/skeleton.c [deleted file]

diff --git a/COPYING b/COPYING
index 2a7e338..ca442d3 100644 (file)
--- a/COPYING
+++ b/COPYING
@@ -18,7 +18,7 @@
                       Version 2, June 1991
 
  Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-                       59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+                       51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  Everyone is permitted to copy and distribute verbatim copies
  of this license document, but changing it is not allowed.
 
@@ -321,7 +321,7 @@ the "copyright" line and a pointer to where the full notice is found.
 
     You should have received a copy of the GNU General Public License
     along with this program; if not, write to the Free Software
-    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 
 
 Also add information on how to contact you by electronic and paper mail.
index f6de52b..433cf5e 100644 (file)
@@ -277,7 +277,7 @@ tty.txt
 unicode.txt
        - info on the Unicode character/font mapping used in Linux.
 uml/
-       - directory with infomation about User Mode Linux.
+       - directory with information about User Mode Linux.
 usb/
        - directory with info regarding the Universal Serial Bus.
 video4linux/
index f25b395..22e5f90 100644 (file)
@@ -236,6 +236,9 @@ ugly), but try to avoid excess.  Instead, put the comments at the head
 of the function, telling people what it does, and possibly WHY it does
 it.
 
+When commenting the kernel API functions, please use the kerneldoc format.
+See the files Documentation/kernel-doc-nano-HOWTO.txt and scripts/kernel-doc
+for details.
 
                Chapter 8: You've made a mess of it
 
index 6ee3cd6..1af0f2d 100644 (file)
@@ -121,7 +121,7 @@ pool's device.
                        dma_addr_t addr);
 
 This puts memory back into the pool.  The pool is what was passed to
-the the pool allocation routine; the cpu and dma addresses are what
+the pool allocation routine; the cpu and dma addresses are what
 were returned when that routine allocated the memory being freed.
 
 
index 1ef6f43..341aaa4 100644 (file)
@@ -116,7 +116,7 @@ filesystem. Almost.
 
 You still need to actually journal your filesystem changes, this
 is done by wrapping them into transactions. Additionally you
-also need to wrap the modification of each of the the buffers
+also need to wrap the modification of each of the buffers
 with calls to the journal layer, so it knows what the modifications
 you are actually making are. To do this use  journal_start() which
 returns a transaction handle.
@@ -128,7 +128,7 @@ and its counterpart journal_stop(), which indicates the end of a transaction
 are nestable calls, so you can reenter a transaction if necessary,
 but remember you must call journal_stop() the same number of times as
 journal_start() before the transaction is completed (or more accurately
-leaves the the update phase). Ext3/VFS makes use of this feature to simplify 
+leaves the update phase). Ext3/VFS makes use of this feature to simplify
 quota support.
 </para>
 
index f3ef0bf..705c442 100644 (file)
@@ -841,7 +841,7 @@ usbdev_ioctl (int fd, int ifno, unsigned request, void *param)
                    File modification time is not updated by this request.
                    </para><para>
                    Those struct members are from some interface descriptor
-                   applying to the the current configuration.
+                   applying to the current configuration.
                    The interface number is the bInterfaceNumber value, and
                    the altsetting number is the bAlternateSetting value.
                    (This resets each endpoint in the interface.)
index d5032eb..63edc5f 100644 (file)
@@ -430,7 +430,7 @@ which may result in system hang. The software driver of specific
 MSI-capable hardware is responsible for whether calling
 pci_enable_msi or not. A return of zero indicates the kernel
 successfully initializes the MSI/MSI-X capability structure of the
-device funtion. The device function is now running on MSI/MSI-X mode.
+device function. The device function is now running on MSI/MSI-X mode.
 
 5.6 How to tell whether MSI/MSI-X is enabled on device function
 
index 9c6d450..fcbcbc3 100644 (file)
@@ -2,7 +2,8 @@ Read the F-ing Papers!
 
 
 This document describes RCU-related publications, and is followed by
-the corresponding bibtex entries.
+the corresponding bibtex entries.  A number of the publications may
+be found at http://www.rdrop.com/users/paulmck/RCU/.
 
 The first thing resembling RCU was published in 1980, when Kung and Lehman
 [Kung80] recommended use of a garbage collector to defer destruction
@@ -113,6 +114,10 @@ describing how to make RCU safe for soft-realtime applications [Sarma04c],
 and a paper describing SELinux performance with RCU [JamesMorris04b].
 
 
+2005 has seen further adaptation of RCU to realtime use, permitting
+preemption of RCU realtime critical sections [PaulMcKenney05a,
+PaulMcKenney05b].
+
 Bibtex Entries
 
 @article{Kung80
@@ -410,3 +415,32 @@ Oregon Health and Sciences University"
 \url{http://www.livejournal.com/users/james_morris/2153.html}
 [Viewed December 10, 2004]"
 }
+
+@unpublished{PaulMcKenney05a
+,Author="Paul E. McKenney"
+,Title="{[RFC]} {RCU} and {CONFIG\_PREEMPT\_RT} progress"
+,month="May"
+,year="2005"
+,note="Available:
+\url{http://lkml.org/lkml/2005/5/9/185}
+[Viewed May 13, 2005]"
+,annotation="
+       First publication of working lock-based deferred free patches
+       for the CONFIG_PREEMPT_RT environment.
+"
+}
+
+@conference{PaulMcKenney05b
+,Author="Paul E. McKenney and Dipankar Sarma"
+,Title="Towards Hard Realtime Response from the Linux Kernel on SMP Hardware"
+,Booktitle="linux.conf.au 2005"
+,month="April"
+,year="2005"
+,address="Canberra, Australia"
+,note="Available:
+\url{http://www.rdrop.com/users/paulmck/RCU/realtimeRCU.2005.04.23a.pdf}
+[Viewed May 13, 2005]"
+,annotation="
+       Realtime turns into making RCU yet more realtime friendly.
+"
+}
index 3bfb84b..aab4a9e 100644 (file)
@@ -8,7 +8,7 @@ is that since there is only one CPU, it should not be necessary to
 wait for anything else to get done, since there are no other CPUs for
 anything else to be happening on.  Although this approach will -sort- -of-
 work a surprising amount of the time, it is a very bad idea in general.
-This document presents two examples that demonstrate exactly how bad an
+This document presents three examples that demonstrate exactly how bad an
 idea this is.
 
 
@@ -26,6 +26,9 @@ from softirq, the list scan would find itself referencing a newly freed
 element B.  This situation can greatly decrease the life expectancy of
 your kernel.
 
+This same problem can occur if call_rcu() is invoked from a hardware
+interrupt handler.
+
 
 Example 2: Function-Call Fatality
 
@@ -44,8 +47,37 @@ its arguments would cause it to fail to make the fundamental guarantee
 underlying RCU, namely that call_rcu() defers invoking its arguments until
 all RCU read-side critical sections currently executing have completed.
 
-Quick Quiz: why is it -not- legal to invoke synchronize_rcu() in
-this case?
+Quick Quiz #1: why is it -not- legal to invoke synchronize_rcu() in
+       this case?
+
+
+Example 3: Death by Deadlock
+
+Suppose that call_rcu() is invoked while holding a lock, and that the
+callback function must acquire this same lock.  In this case, if
+call_rcu() were to directly invoke the callback, the result would
+be self-deadlock.
+
+In some cases, it would possible to restructure to code so that
+the call_rcu() is delayed until after the lock is released.  However,
+there are cases where this can be quite ugly:
+
+1.     If a number of items need to be passed to call_rcu() within
+       the same critical section, then the code would need to create
+       a list of them, then traverse the list once the lock was
+       released.
+
+2.     In some cases, the lock will be held across some kernel API,
+       so that delaying the call_rcu() until the lock is released
+       requires that the data item be passed up via a common API.
+       It is far better to guarantee that callbacks are invoked
+       with no locks held than to have to modify such APIs to allow
+       arbitrary data items to be passed back up through them.
+
+If call_rcu() directly invokes the callback, painful locking restrictions
+or API changes would be required.
+
+Quick Quiz #2: What locking restriction must RCU callbacks respect?
 
 
 Summary
@@ -53,12 +85,35 @@ Summary
 Permitting call_rcu() to immediately invoke its arguments or permitting
 synchronize_rcu() to immediately return breaks RCU, even on a UP system.
 So do not do it!  Even on a UP system, the RCU infrastructure -must-
-respect grace periods.
-
-
-Answer to Quick Quiz
-
-The calling function is scanning an RCU-protected linked list, and
-is therefore within an RCU read-side critical section.  Therefore,
-the called function has been invoked within an RCU read-side critical
-section, and is not permitted to block.
+respect grace periods, and -must- invoke callbacks from a known environment
+in which no locks are held.
+
+
+Answer to Quick Quiz #1:
+       Why is it -not- legal to invoke synchronize_rcu() in this case?
+
+       Because the calling function is scanning an RCU-protected linked
+       list, and is therefore within an RCU read-side critical section.
+       Therefore, the called function has been invoked within an RCU
+       read-side critical section, and is not permitted to block.
+
+Answer to Quick Quiz #2:
+       What locking restriction must RCU callbacks respect?
+
+       Any lock that is acquired within an RCU callback must be
+       acquired elsewhere using an _irq variant of the spinlock
+       primitive.  For example, if "mylock" is acquired by an
+       RCU callback, then a process-context acquisition of this
+       lock must use something like spin_lock_irqsave() to
+       acquire the lock.
+
+       If the process-context code were to simply use spin_lock(),
+       then, since RCU callbacks can be invoked from softirq context,
+       the callback might be called from a softirq that interrupted
+       the process-context critical section.  This would result in
+       self-deadlock.
+
+       This restriction might seem gratuitous, since very few RCU
+       callbacks acquire locks directly.  However, a great many RCU
+       callbacks do acquire locks -indirectly-, for example, via
+       the kfree() primitive.
index 8f3fb77..e118a7c 100644 (file)
@@ -43,6 +43,10 @@ over a rather long period of time, but improvements are always welcome!
        rcu_read_lock_bh()) in the read-side critical sections,
        and are also an excellent aid to readability.
 
+       As a rough rule of thumb, any dereference of an RCU-protected
+       pointer must be covered by rcu_read_lock() or rcu_read_lock_bh()
+       or by the appropriate update-side lock.
+
 3.     Does the update code tolerate concurrent accesses?
 
        The whole point of RCU is to permit readers to run without
@@ -90,7 +94,11 @@ over a rather long period of time, but improvements are always welcome!
 
                The rcu_dereference() primitive is used by the various
                "_rcu()" list-traversal primitives, such as the
-               list_for_each_entry_rcu().
+               list_for_each_entry_rcu().  Note that it is perfectly
+               legal (if redundant) for update-side code to use
+               rcu_dereference() and the "_rcu()" list-traversal
+               primitives.  This is particularly useful in code
+               that is common to readers and updaters.
 
        b.      If the list macros are being used, the list_add_tail_rcu()
                and list_add_rcu() primitives must be used in order
@@ -150,16 +158,9 @@ over a rather long period of time, but improvements are always welcome!
 
        Use of the _rcu() list-traversal primitives outside of an
        RCU read-side critical section causes no harm other than
-       a slight performance degradation on Alpha CPUs and some
-       confusion on the part of people trying to read the code.
-
-       Another way of thinking of this is "If you are holding the
-       lock that prevents the data structure from changing, why do
-       you also need RCU-based protection?"  That said, there may
-       well be situations where use of the _rcu() list-traversal
-       primitives while the update-side lock is held results in
-       simpler and more maintainable code.  The jury is still out
-       on this question.
+       a slight performance degradation on Alpha CPUs.  It can
+       also be quite helpful in reducing code bloat when common
+       code is shared between readers and updaters.
 
 10.    Conversely, if you are in an RCU read-side critical section,
        you -must- use the "_rcu()" variants of the list macros.
index eb44400..6fa0922 100644 (file)
@@ -64,6 +64,54 @@ o    I hear that RCU is patented?  What is with that?
        Of these, one was allowed to lapse by the assignee, and the
        others have been contributed to the Linux kernel under GPL.
 
+o      I hear that RCU needs work in order to support realtime kernels?
+
+       Yes, work in progress.
+
 o      Where can I find more information on RCU?
 
        See the RTFP.txt file in this directory.
+       Or point your browser at http://www.rdrop.com/users/paulmck/RCU/.
+
+o      What are all these files in this directory?
+
+
+       NMI-RCU.txt
+
+               Describes how to use RCU to implement dynamic
+               NMI handlers, which can be revectored on the fly,
+               without rebooting.
+
+       RTFP.txt
+
+               List of RCU-related publications and web sites.
+
+       UP.txt
+
+               Discussion of RCU usage in UP kernels.
+
+       arrayRCU.txt
+
+               Describes how to use RCU to protect arrays, with
+               resizeable arrays whose elements reference other
+               data structures being of the most interest.
+
+       checklist.txt
+
+               Lists things to check for when inspecting code that
+               uses RCU.
+
+       listRCU.txt
+
+               Describes how to use RCU to protect linked lists.
+               This is the simplest and most common use of RCU
+               in the Linux kernel.
+
+       rcu.txt
+
+               You are reading it!
+
+       whatisRCU.txt
+
+               Overview of how the RCU implementation works.  Along
+               the way, presents a conceptual view of RCU.
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
new file mode 100644 (file)
index 0000000..354d89c
--- /dev/null
@@ -0,0 +1,902 @@
+What is RCU?
+
+RCU is a synchronization mechanism that was added to the Linux kernel
+during the 2.5 development effort that is optimized for read-mostly
+situations.  Although RCU is actually quite simple once you understand it,
+getting there can sometimes be a challenge.  Part of the problem is that
+most of the past descriptions of RCU have been written with the mistaken
+assumption that there is "one true way" to describe RCU.  Instead,
+the experience has been that different people must take different paths
+to arrive at an understanding of RCU.  This document provides several
+different paths, as follows:
+
+1.     RCU OVERVIEW
+2.     WHAT IS RCU'S CORE API?
+3.     WHAT ARE SOME EXAMPLE USES OF CORE RCU API?
+4.     WHAT IF MY UPDATING THREAD CANNOT BLOCK?
+5.     WHAT ARE SOME SIMPLE IMPLEMENTATIONS OF RCU?
+6.     ANALOGY WITH READER-WRITER LOCKING
+7.     FULL LIST OF RCU APIs
+8.     ANSWERS TO QUICK QUIZZES
+
+People who prefer starting with a conceptual overview should focus on
+Section 1, though most readers will profit by reading this section at
+some point.  People who prefer to start with an API that they can then
+experiment with should focus on Section 2.  People who prefer to start
+with example uses should focus on Sections 3 and 4.  People who need to
+understand the RCU implementation should focus on Section 5, then dive
+into the kernel source code.  People who reason best by analogy should
+focus on Section 6.  Section 7 serves as an index to the docbook API
+documentation, and Section 8 is the traditional answer key.
+
+So, start with the section that makes the most sense to you and your
+preferred method of learning.  If you need to know everything about
+everything, feel free to read the whole thing -- but if you are really
+that type of person, you have perused the source code and will therefore
+never need this document anyway.  ;-)
+
+
+1.  RCU OVERVIEW
+
+The basic idea behind RCU is to split updates into "removal" and
+"reclamation" phases.  The removal phase removes references to data items
+within a data structure (possibly by replacing them with references to
+new versions of these data items), and can run concurrently with readers.
+The reason that it is safe to run the removal phase concurrently with
+readers is the semantics of modern CPUs guarantee that readers will see
+either the old or the new version of the data structure rather than a
+partially updated reference.  The reclamation phase does the work of reclaiming
+(e.g., freeing) the data items removed from the data structure during the
+removal phase.  Because reclaiming data items can disrupt any readers
+concurrently referencing those data items, the reclamation phase must
+not start until readers no longer hold references to those data items.
+
+Splitting the update into removal and reclamation phases permits the
+updater to perform the removal phase immediately, and to defer the
+reclamation phase until all readers active during the removal phase have
+completed, either by blocking until they finish or by registering a
+callback that is invoked after they finish.  Only readers that are active
+during the removal phase need be considered, because any reader starting
+after the removal phase will be unable to gain a reference to the removed
+data items, and therefore cannot be disrupted by the reclamation phase.
+
+So the typical RCU update sequence goes something like the following:
+
+a.     Remove pointers to a data structure, so that subsequent
+       readers cannot gain a reference to it.
+
+b.     Wait for all previous readers to complete their RCU read-side
+       critical sections.
+
+c.     At this point, there cannot be any readers who hold references
+       to the data structure, so it now may safely be reclaimed
+       (e.g., kfree()d).
+
+Step (b) above is the key idea underlying RCU's deferred destruction.
+The ability to wait until all readers are done allows RCU readers to
+use much lighter-weight synchronization, in some cases, absolutely no
+synchronization at all.  In contrast, in more conventional lock-based
+schemes, readers must use heavy-weight synchronization in order to
+prevent an updater from deleting the data structure out from under them.
+This is because lock-based updaters typically update data items in place,
+and must therefore exclude readers.  In contrast, RCU-based updaters
+typically take advantage of the fact that writes to single aligned
+pointers are atomic on modern CPUs, allowing atomic insertion, removal,
+and replacement of data items in a linked structure without disrupting
+readers.  Concurrent RCU readers can then continue accessing the old
+versions, and can dispense with the atomic operations, memory barriers,
+and communications cache misses that are so expensive on present-day
+SMP computer systems, even in absence of lock contention.
+
+In the three-step procedure shown above, the updater is performing both
+the removal and the reclamation step, but it is often helpful for an
+entirely different thread to do the reclamation, as is in fact the case
+in the Linux kernel's directory-entry cache (dcache).  Even if the same
+thread performs both the update step (step (a) above) and the reclamation
+step (step (c) above), it is often helpful to think of them separately.
+For example, RCU readers and updaters need not communicate at all,
+but RCU provides implicit low-overhead communication between readers
+and reclaimers, namely, in step (b) above.
+
+So how the heck can a reclaimer tell when a reader is done, given
+that readers are not doing any sort of synchronization operations???
+Read on to learn about how RCU's API makes this easy.
+
+
+2.  WHAT IS RCU'S CORE API?
+
+The core RCU API is quite small:
+
+a.     rcu_read_lock()
+b.     rcu_read_unlock()
+c.     synchronize_rcu() / call_rcu()
+d.     rcu_assign_pointer()
+e.     rcu_dereference()
+
+There are many other members of the RCU API, but the rest can be
+expressed in terms of these five, though most implementations instead
+express synchronize_rcu() in terms of the call_rcu() callback API.
+
+The five core RCU APIs are described below, the other 18 will be enumerated
+later.  See the kernel docbook documentation for more info, or look directly
+at the function header comments.
+
+rcu_read_lock()
+
+       void rcu_read_lock(void);
+
+       Used by a reader to inform the reclaimer that the reader is
+       entering an RCU read-side critical section.  It is illegal
+       to block while in an RCU read-side critical section, though
+       kernels built with CONFIG_PREEMPT_RCU can preempt RCU read-side
+       critical sections.  Any RCU-protected data structure accessed
+       during an RCU read-side critical section is guaranteed to remain
+       unreclaimed for the full duration of that critical section.
+       Reference counts may be used in conjunction with RCU to maintain
+       longer-term references to data structures.
+
+rcu_read_unlock()
+
+       void rcu_read_unlock(void);
+
+       Used by a reader to inform the reclaimer that the reader is
+       exiting an RCU read-side critical section.  Note that RCU
+       read-side critical sections may be nested and/or overlapping.
+
+synchronize_rcu()
+
+       void synchronize_rcu(void);
+
+       Marks the end of updater code and the beginning of reclaimer
+       code.  It does this by blocking until all pre-existing RCU
+       read-side critical sections on all CPUs have completed.
+       Note that synchronize_rcu() will -not- necessarily wait for
+       any subsequent RCU read-side critical sections to complete.
+       For example, consider the following sequence of events:
+
+                CPU 0                  CPU 1                 CPU 2
+            ----------------- ------------------------- ---------------
+        1.  rcu_read_lock()
+        2.                    enters synchronize_rcu()
+        3.                                               rcu_read_lock()
+        4.  rcu_read_unlock()
+        5.                     exits synchronize_rcu()
+        6.                                              rcu_read_unlock()
+
+       To reiterate, synchronize_rcu() waits only for ongoing RCU
+       read-side critical sections to complete, not necessarily for
+       any that begin after synchronize_rcu() is invoked.
+
+       Of course, synchronize_rcu() does not necessarily return
+       -immediately- after the last pre-existing RCU read-side critical
+       section completes.  For one thing, there might well be scheduling
+       delays.  For another thing, many RCU implementations process
+       requests in batches in order to improve efficiencies, which can
+       further delay synchronize_rcu().
+
+       Since synchronize_rcu() is the API that must figure out when
+       readers are done, its implementation is key to RCU.  For RCU
+       to be useful in all but the most read-intensive situations,
+       synchronize_rcu()'s overhead must also be quite small.
+
+       The call_rcu() API is a callback form of synchronize_rcu(),
+       and is described in more detail in a later section.  Instead of
+       blocking, it registers a function and argument which are invoked
+       after all ongoing RCU read-side critical sections have completed.
+       This callback variant is particularly useful in situations where
+       it is illegal to block.
+
+rcu_assign_pointer()
+
+       typeof(p) rcu_assign_pointer(p, typeof(p) v);
+
+       Yes, rcu_assign_pointer() -is- implemented as a macro, though it
+       would be cool to be able to declare a function in this manner.
+       (Compiler experts will no doubt disagree.)
+
+       The updater uses this function to assign a new value to an
+       RCU-protected pointer, in order to safely communicate the change
+       in value from the updater to the reader.  This function returns
+       the new value, and also executes any memory-barrier instructions
+       required for a given CPU architecture.
+
+       Perhaps more important, it serves to document which pointers
+       are protected by RCU.  That said, rcu_assign_pointer() is most
+       frequently used indirectly, via the _rcu list-manipulation
+       primitives such as list_add_rcu().
+
+rcu_dereference()
+
+       typeof(p) rcu_dereference(p);
+
+       Like rcu_assign_pointer(), rcu_dereference() must be implemented
+       as a macro.
+
+       The reader uses rcu_dereference() to fetch an RCU-protected
+       pointer, which returns a value that may then be safely
+       dereferenced.  Note that rcu_deference() does not actually
+       dereference the pointer, instead, it protects the pointer for
+       later dereferencing.  It also executes any needed memory-barrier
+       instructions for a given CPU architecture.  Currently, only Alpha
+       needs memory barriers within rcu_dereference() -- on other CPUs,
+       it compiles to nothing, not even a compiler directive.
+
+       Common coding practice uses rcu_dereference() to copy an
+       RCU-protected pointer to a local variable, then dereferences
+       this local variable, for example as follows:
+
+               p = rcu_dereference(head.next);
+               return p->data;
+
+       However, in this case, one could just as easily combine these
+       into one statement:
+
+               return rcu_dereference(head.next)->data;
+
+       If you are going to be fetching multiple fields from the
+       RCU-protected structure, using the local variable is of
+       course preferred.  Repeated rcu_dereference() calls look
+       ugly and incur unnecessary overhead on Alpha CPUs.
+
+       Note that the value returned by rcu_dereference() is valid
+       only within the enclosing RCU read-side critical section.
+       For example, the following is -not- legal:
+
+               rcu_read_lock();
+               p = rcu_dereference(head.next);
+               rcu_read_unlock();
+               x = p->address;
+               rcu_read_lock();
+               y = p->data;
+               rcu_read_unlock();
+
+       Holding a reference from one RCU read-side critical section
+       to another is just as illegal as holding a reference from
+       one lock-based critical section to another!  Similarly,
+       using a reference outside of the critical section in which
+       it was acquired is just as illegal as doing so with normal
+       locking.
+
+       As with rcu_assign_pointer(), an important function of
+       rcu_dereference() is to document which pointers are protected
+       by RCU.  And, again like rcu_assign_pointer(), rcu_dereference()
+       is typically used indirectly, via the _rcu list-manipulation
+       primitives, such as list_for_each_entry_rcu().
+
+The following diagram shows how each API communicates among the
+reader, updater, and reclaimer.
+
+
+           rcu_assign_pointer()
+                                   +--------+
+           +---------------------->| reader |---------+
+           |                       +--------+         |
+           |                           |              |
+           |                           |              | Protect:
+           |                           |              | rcu_read_lock()
+           |                           |              | rcu_read_unlock()
+           |        rcu_dereference()  |              |
+       +---------+                      |              |
+       | updater |<---------------------+              |
+       +---------+                                     V
+           |                                    +-----------+
+           +----------------------------------->| reclaimer |
+                                                +-----------+
+             Defer:
+             synchronize_rcu() & call_rcu()
+
+
+The RCU infrastructure observes the time sequence of rcu_read_lock(),
+rcu_read_unlock(), synchronize_rcu(), and call_rcu() invocations in
+order to determine when (1) synchronize_rcu() invocations may return
+to their callers and (2) call_rcu() callbacks may be invoked.  Efficient
+implementations of the RCU infrastructure make heavy use of batching in
+order to amortize their overhead over many uses of the corresponding APIs.
+
+There are no fewer than three RCU mechanisms in the Linux kernel; the
+diagram above shows the first one, which is by far the most commonly used.
+The rcu_dereference() and rcu_assign_pointer() primitives are used for
+all three mechanisms, but different defer and protect primitives are
+used as follows:
+
+       Defer                   Protect
+
+a.     synchronize_rcu()       rcu_read_lock() / rcu_read_unlock()
+       call_rcu()
+
+b.     call_rcu_bh()           rcu_read_lock_bh() / rcu_read_unlock_bh()
+
+c.     synchronize_sched()     preempt_disable() / preempt_enable()
+                               local_irq_save() / local_irq_restore()
+                               hardirq enter / hardirq exit
+                               NMI enter / NMI exit
+
+These three mechanisms are used as follows:
+
+a.     RCU applied to normal data structures.
+
+b.     RCU applied to networking data structures that may be subjected
+       to remote denial-of-service attacks.
+
+c.     RCU applied to scheduler and interrupt/NMI-handler tasks.
+
+Again, most uses will be of (a).  The (b) and (c) cases are important
+for specialized uses, but are relatively uncommon.
+
+
+3.  WHAT ARE SOME EXAMPLE USES OF CORE RCU API?
+
+This section shows a simple use of the core RCU API to protect a
+global pointer to a dynamically allocated structure.  More typical
+uses of RCU may be found in listRCU.txt, arrayRCU.txt, and NMI-RCU.txt.
+
+       struct foo {
+               int a;
+               char b;
+               long c;
+       };
+       DEFINE_SPINLOCK(foo_mutex);
+
+       struct foo *gbl_foo;
+
+       /*
+        * Create a new struct foo that is the same as the one currently
+        * pointed to by gbl_foo, except that field "a" is replaced
+        * with "new_a".  Points gbl_foo to the new structure, and
+        * frees up the old structure after a grace period.
+        *
+        * Uses rcu_assign_pointer() to ensure that concurrent readers
+        * see the initialized version of the new structure.
+        *
+        * Uses synchronize_rcu() to ensure that any readers that might
+        * have references to the old structure complete before freeing
+        * the old structure.
+        */
+       void foo_update_a(int new_a)
+       {
+               struct foo *new_fp;
+               struct foo *old_fp;
+
+               new_fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+               spin_lock(&foo_mutex);
+               old_fp = gbl_foo;
+               *new_fp = *old_fp;
+               new_fp->a = new_a;
+               rcu_assign_pointer(gbl_foo, new_fp);
+               spin_unlock(&foo_mutex);
+               synchronize_rcu();
+               kfree(old_fp);
+       }
+
+       /*
+        * Return the value of field "a" of the current gbl_foo
+        * structure.  Use rcu_read_lock() and rcu_read_unlock()
+        * to ensure that the structure does not get deleted out
+        * from under us, and use rcu_dereference() to ensure that
+        * we see the initialized version of the structure (important
+        * for DEC Alpha and for people reading the code).
+        */
+       int foo_get_a(void)
+       {
+               int retval;
+
+               rcu_read_lock();
+               retval = rcu_dereference(gbl_foo)->a;
+               rcu_read_unlock();
+               return retval;
+       }
+
+So, to sum up:
+
+o      Use rcu_read_lock() and rcu_read_unlock() to guard RCU
+       read-side critical sections.
+
+o      Within an RCU read-side critical section, use rcu_dereference()
+       to dereference RCU-protected pointers.
+
+o      Use some solid scheme (such as locks or semaphores) to
+       keep concurrent updates from interfering with each other.
+
+o      Use rcu_assign_pointer() to update an RCU-protected pointer.
+       This primitive protects concurrent readers from the updater,
+       -not- concurrent updates from each other!  You therefore still
+       need to use locking (or something similar) to keep concurrent
+       rcu_assign_pointer() primitives from interfering with each other.
+
+o      Use synchronize_rcu() -after- removing a data element from an
+       RCU-protected data structure, but -before- reclaiming/freeing
+       the data element, in order to wait for the completion of all
+       RCU read-side critical sections that might be referencing that
+       data item.
+
+See checklist.txt for additional rules to follow when using RCU.
+
+
+4.  WHAT IF MY UPDATING THREAD CANNOT BLOCK?
+
+In the example above, foo_update_a() blocks until a grace period elapses.
+This is quite simple, but in some cases one cannot afford to wait so
+long -- there might be other high-priority work to be done.
+
+In such cases, one uses call_rcu() rather than synchronize_rcu().
+The call_rcu() API is as follows:
+
+       void call_rcu(struct rcu_head * head,
+                     void (*func)(struct rcu_head *head));
+
+This function invokes func(head) after a grace period has elapsed.
+This invocation might happen from either softirq or process context,
+so the function is not permitted to block.  The foo struct needs to
+have an rcu_head structure added, perhaps as follows:
+
+       struct foo {
+               int a;
+               char b;
+               long c;
+               struct rcu_head rcu;
+       };
+
+The foo_update_a() function might then be written as follows:
+
+       /*
+        * Create a new struct foo that is the same as the one currently
+        * pointed to by gbl_foo, except that field "a" is replaced
+        * with "new_a".  Points gbl_foo to the new structure, and
+        * frees up the old structure after a grace period.
+        *
+        * Uses rcu_assign_pointer() to ensure that concurrent readers
+        * see the initialized version of the new structure.
+        *
+        * Uses call_rcu() to ensure that any readers that might have
+        * references to the old structure complete before freeing the
+        * old structure.
+        */
+       void foo_update_a(int new_a)
+       {
+               struct foo *new_fp;
+               struct foo *old_fp;
+
+               new_fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+               spin_lock(&foo_mutex);
+               old_fp = gbl_foo;
+               *new_fp = *old_fp;
+               new_fp->a = new_a;
+               rcu_assign_pointer(gbl_foo, new_fp);
+               spin_unlock(&foo_mutex);
+               call_rcu(&old_fp->rcu, foo_reclaim);
+       }
+
+The foo_reclaim() function might appear as follows:
+
+       void foo_reclaim(struct rcu_head *rp)
+       {
+               struct foo *fp = container_of(rp, struct foo, rcu);
+
+               kfree(fp);
+       }
+
+The container_of() primitive is a macro that, given a pointer into a
+struct, the type of the struct, and the pointed-to field within the
+struct, returns a pointer to the beginning of the struct.
+
+The use of call_rcu() permits the caller of foo_update_a() to
+immediately regain control, without needing to worry further about the
+old version of the newly updated element.  It also clearly shows the
+RCU distinction between updater, namely foo_update_a(), and reclaimer,
+namely foo_reclaim().
+
+The summary of advice is the same as for the previous section, except
+that we are now using call_rcu() rather than synchronize_rcu():
+
+o      Use call_rcu() -after- removing a data element from an
+       RCU-protected data structure in order to register a callback
+       function that will be invoked after the completion of all RCU
+       read-side critical sections that might be referencing that
+       data item.
+
+Again, see checklist.txt for additional rules governing the use of RCU.
+
+
+5.  WHAT ARE SOME SIMPLE IMPLEMENTATIONS OF RCU?
+
+One of the nice things about RCU is that it has extremely simple "toy"
+implementations that are a good first step towards understanding the
+production-quality implementations in the Linux kernel.  This section
+presents two such "toy" implementations of RCU, one that is implemented
+in terms of familiar locking primitives, and another that more closely
+resembles "classic" RCU.  Both are way too simple for real-world use,
+lacking both functionality and performance.  However, they are useful
+in getting a feel for how RCU works.  See kernel/rcupdate.c for a
+production-quality implementation, and see:
+
+       http://www.rdrop.com/users/paulmck/RCU
+
+for papers describing the Linux kernel RCU implementation.  The OLS'01
+and OLS'02 papers are a good introduction, and the dissertation provides
+more details on the current implementation.
+
+
+5A.  "TOY" IMPLEMENTATION #1: LOCKING
+
+This section presents a "toy" RCU implementation that is based on
+familiar locking primitives.  Its overhead makes it a non-starter for
+real-life use, as does its lack of scalability.  It is also unsuitable
+for realtime use, since it allows scheduling latency to "bleed" from
+one read-side critical section to another.
+
+However, it is probably the easiest implementation to relate to, so is
+a good starting point.
+
+It is extremely simple:
+
+       static DEFINE_RWLOCK(rcu_gp_mutex);
+
+       void rcu_read_lock(void)
+       {
+               read_lock(&rcu_gp_mutex);
+       }
+
+       void rcu_read_unlock(void)
+       {
+               read_unlock(&rcu_gp_mutex);
+       }
+
+       void synchronize_rcu(void)
+       {
+               write_lock(&rcu_gp_mutex);
+               write_unlock(&rcu_gp_mutex);
+       }
+
+[You can ignore rcu_assign_pointer() and rcu_dereference() without
+missing much.  But here they are anyway.  And whatever you do, don't
+forget about them when submitting patches making use of RCU!]
+
+       #define rcu_assign_pointer(p, v)        ({ \
+                                                       smp_wmb(); \
+                                                       (p) = (v); \
+                                               })
+
+       #define rcu_dereference(p)     ({ \
+                                       typeof(p) _________p1 = p; \
+                                       smp_read_barrier_depends(); \
+                                       (_________p1); \
+                                       })
+
+
+The rcu_read_lock() and rcu_read_unlock() primitive read-acquire
+and release a global reader-writer lock.  The synchronize_rcu()
+primitive write-acquires this same lock, then immediately releases
+it.  This means that once synchronize_rcu() exits, all RCU read-side
+critical sections that were in progress before synchonize_rcu() was
+called are guaranteed to have completed -- there is no way that
+synchronize_rcu() would have been able to write-acquire the lock
+otherwise.
+
+It is possible to nest rcu_read_lock(), since reader-writer locks may
+be recursively acquired.  Note also that rcu_read_lock() is immune
+from deadlock (an important property of RCU).  The reason for this is
+that the only thing that can block rcu_read_lock() is a synchronize_rcu().
+But synchronize_rcu() does not acquire any locks while holding rcu_gp_mutex,
+so there can be no deadlock cycle.
+
+Quick Quiz #1: Why is this argument naive?  How could a deadlock
+               occur when using this algorithm in a real-world Linux
+               kernel?  How could this deadlock be avoided?
+
+
+5B.  "TOY" EXAMPLE #2: CLASSIC RCU
+
+This section presents a "toy" RCU implementation that is based on
+"classic RCU".  It is also short on performance (but only for updates) and
+on features such as hotplug CPU and the ability to run in CONFIG_PREEMPT
+kernels.  The definitions of rcu_dereference() and rcu_assign_pointer()
+are the same as those shown in the preceding section, so they are omitted.
+
+       void rcu_read_lock(void) { }
+
+       void rcu_read_unlock(void) { }
+
+       void synchronize_rcu(void)
+       {
+               int cpu;
+
+               for_each_cpu(cpu)
+                       run_on(cpu);
+       }
+
+Note that rcu_read_lock() and rcu_read_unlock() do absolutely nothing.
+This is the great strength of classic RCU in a non-preemptive kernel:
+read-side overhead is precisely zero, at least on non-Alpha CPUs.
+And there is absolutely no way that rcu_read_lock() can possibly
+participate in a deadlock cycle!
+
+The implementation of synchronize_rcu() simply schedules itself on each
+CPU in turn.  The run_on() primitive can be implemented straightforwardly
+in terms of the sched_setaffinity() primitive.  Of course, a somewhat less
+"toy" implementation would restore the affinity upon completion rather
+than just leaving all tasks running on the last CPU, but when I said
+"toy", I meant -toy-!
+
+So how the heck is this supposed to work???
+
+Remember that it is illegal to block while in an RCU read-side critical
+section.  Therefore, if a given CPU executes a context switch, we know
+that it must have completed all preceding RCU read-side critical sections.
+Once -all- CPUs have executed a context switch, then -all- preceding
+RCU read-side critical sections will have completed.
+
+So, suppose that we remove a data item from its structure and then invoke
+synchronize_rcu().  Once synchronize_rcu() returns, we are guaranteed
+that there are no RCU read-side critical sections holding a reference
+to that data item, so we can safely reclaim it.
+
+Quick Quiz #2: Give an example where Classic RCU's read-side
+               overhead is -negative-.
+
+Quick Quiz #3:  If it is illegal to block in an RCU read-side
+               critical section, what the heck do you do in
+               PREEMPT_RT, where normal spinlocks can block???
+
+
+6.  ANALOGY WITH READER-WRITER LOCKING
+
+Although RCU can be used in many different ways, a very common use of
+RCU is analogous to reader-writer locking.  The following unified
+diff shows how closely related RCU and reader-writer locking can be.
+
+       @@ -13,15 +14,15 @@
+               struct list_head *lp;
+               struct el *p;
+
+       -       read_lock();
+       -       list_for_each_entry(p, head, lp) {
+       +       rcu_read_lock();
+       +       list_for_each_entry_rcu(p, head, lp) {
+                       if (p->key == key) {
+                               *result = p->data;
+       -                       read_unlock();
+       +                       rcu_read_unlock();
+                               return 1;
+                       }
+               }
+       -       read_unlock();
+       +       rcu_read_unlock();
+               return 0;
+        }
+
+       @@ -29,15 +30,16 @@
+        {
+               struct el *p;
+
+       -       write_lock(&listmutex);
+       +       spin_lock(&listmutex);
+               list_for_each_entry(p, head, lp) {
+                       if (p->key == key) {
+                               list_del(&p->list);
+       -                       write_unlock(&listmutex);
+       +                       spin_unlock(&listmutex);
+       +                       synchronize_rcu();
+                               kfree(p);
+                               return 1;
+                       }
+               }
+       -       write_unlock(&listmutex);
+       +       spin_unlock(&listmutex);
+               return 0;
+        }
+
+Or, for those who prefer a side-by-side listing:
+
+ 1 struct el {                          1 struct el {
+ 2   struct list_head list;             2   struct list_head list;
+ 3   long key;                          3   long key;
+ 4   spinlock_t mutex;                  4   spinlock_t mutex;
+ 5   int data;                          5   int data;
+ 6   /* Other data fields */            6   /* Other data fields */
+ 7 };                                   7 };
+ 8 spinlock_t listmutex;                8 spinlock_t listmutex;
+ 9 struct el head;                      9 struct el head;
+
+ 1 int search(long key, int *result)    1 int search(long key, int *result)
+ 2 {                                    2 {
+ 3   struct list_head *lp;              3   struct list_head *lp;
+ 4   struct el *p;                      4   struct el *p;
+ 5                                      5
+ 6   read_lock();                       6   rcu_read_lock();
+ 7   list_for_each_entry(p, head, lp) { 7   list_for_each_entry_rcu(p, head, lp) {
+ 8     if (p->key == key) {             8     if (p->key == key) {
+ 9       *result = p->data;             9       *result = p->data;
+10       read_unlock();                10       rcu_read_unlock();
+11       return 1;                     11       return 1;
+12     }                               12     }
+13   }                                 13   }
+14   read_unlock();                    14   rcu_read_unlock();
+15   return 0;                         15   return 0;
+16 }                                   16 }
+
+ 1 int delete(long key)                 1 int delete(long key)
+ 2 {                                    2 {
+ 3   struct el *p;                      3   struct el *p;
+ 4                                      4
+ 5   write_lock(&listmutex);            5   spin_lock(&listmutex);
+ 6   list_for_each_entry(p, head, lp) { 6   list_for_each_entry(p, head, lp) {
+ 7     if (p->key == key) {             7     if (p->key == key) {
+ 8       list_del(&p->list);            8       list_del(&p->list);
+ 9       write_unlock(&listmutex);      9       spin_unlock(&listmutex);
+                                       10       synchronize_rcu();
+10       kfree(p);                     11       kfree(p);
+11       return 1;                     12       return 1;
+12     }                               13     }
+13   }                                 14   }
+14   write_unlock(&listmutex);         15   spin_unlock(&listmutex);
+15   return 0;                         16   return 0;
+16 }                                   17 }
+
+Either way, the differences are quite small.  Read-side locking moves
+to rcu_read_lock() and rcu_read_unlock, update-side locking moves from
+from a reader-writer lock to a simple spinlock, and a synchronize_rcu()
+precedes the kfree().
+
+However, there is one potential catch: the read-side and update-side
+critical sections can now run concurrently.  In many cases, this will
+not be a problem, but it is necessary to check carefully regardless.
+For example, if multiple independent list updates must be seen as
+a single atomic update, converting to RCU will require special care.
+
+Also, the presence of synchronize_rcu() means that the RCU version of
+delete() can now block.  If this is a problem, there is a callback-based
+mechanism that never blocks, namely call_rcu(), that can be used in
+place of synchronize_rcu().
+
+
+7.  FULL LIST OF RCU APIs
+
+The RCU APIs are documented in docbook-format header comments in the
+Linux-kernel source code, but it helps to have a full list of the
+APIs, since there does not appear to be a way to categorize them
+in docbook.  Here is the list, by category.
+
+Markers for RCU read-side critical sections:
+
+       rcu_read_lock
+       rcu_read_unlock
+       rcu_read_lock_bh
+       rcu_read_unlock_bh
+
+RCU pointer/list traversal:
+
+       rcu_dereference
+       list_for_each_rcu               (to be deprecated in favor of
+                                        list_for_each_entry_rcu)
+       list_for_each_safe_rcu          (deprecated, not used)
+       list_for_each_entry_rcu
+       list_for_each_continue_rcu      (to be deprecated in favor of new
+                                        list_for_each_entry_continue_rcu)
+       hlist_for_each_rcu              (to be deprecated in favor of
+                                        hlist_for_each_entry_rcu)
+       hlist_for_each_entry_rcu
+
+RCU pointer update:
+
+       rcu_assign_pointer
+       list_add_rcu
+       list_add_tail_rcu
+       list_del_rcu
+       list_replace_rcu
+       hlist_del_rcu
+       hlist_add_head_rcu
+
+RCU grace period:
+
+       synchronize_kernel (deprecated)
+       synchronize_net
+       synchronize_sched
+       synchronize_rcu
+       call_rcu
+       call_rcu_bh
+
+See the comment headers in the source code (or the docbook generated
+from them) for more information.
+
+
+8.  ANSWERS TO QUICK QUIZZES
+
+Quick Quiz #1: Why is this argument naive?  How could a deadlock
+               occur when using this algorithm in a real-world Linux
+               kernel?  [Referring to the lock-based "toy" RCU
+               algorithm.]
+
+Answer:                Consider the following sequence of events:
+
+               1.      CPU 0 acquires some unrelated lock, call it
+                       "problematic_lock".
+
+               2.      CPU 1 enters synchronize_rcu(), write-acquiring
+                       rcu_gp_mutex.
+
+               3.      CPU 0 enters rcu_read_lock(), but must wait
+                       because CPU 1 holds rcu_gp_mutex.
+
+               4.      CPU 1 is interrupted, and the irq handler
+                       attempts to acquire problematic_lock.
+
+               The system is now deadlocked.
+
+               One way to avoid this deadlock is to use an approach like
+               that of CONFIG_PREEMPT_RT, where all normal spinlocks
+               become blocking locks, and all irq handlers execute in
+               the context of special tasks.  In this case, in step 4
+               above, the irq handler would block, allowing CPU 1 to
+               release rcu_gp_mutex, avoiding the deadlock.
+
+               Even in the absence of deadlock, this RCU implementation
+               allows latency to "bleed" from readers to other
+               readers through synchronize_rcu().  To see this,
+               consider task A in an RCU read-side critical section
+               (thus read-holding rcu_gp_mutex), task B blocked
+               attempting to write-acquire rcu_gp_mutex, and
+               task C blocked in rcu_read_lock() attempting to
+               read_acquire rcu_gp_mutex.  Task A's RCU read-side
+               latency is holding up task C, albeit indirectly via
+               task B.
+
+               Realtime RCU implementations therefore use a counter-based
+               approach where tasks in RCU read-side critical sections
+               cannot be blocked by tasks executing synchronize_rcu().
+
+Quick Quiz #2: Give an example where Classic RCU's read-side
+               overhead is -negative-.
+
+Answer:                Imagine a single-CPU system with a non-CONFIG_PREEMPT
+               kernel where a routing table is used by process-context
+               code, but can be updated by irq-context code (for example,
+               by an "ICMP REDIRECT" packet).  The usual way of handling
+               this would be to have the process-context code disable
+               interrupts while searching the routing table.  Use of
+               RCU allows such interrupt-disabling to be dispensed with.
+               Thus, without RCU, you pay the cost of disabling interrupts,
+               and with RCU you don't.
+
+               One can argue that the overhead of RCU in this
+               case is negative with respect to the single-CPU
+               interrupt-disabling approach.  Others might argue that
+               the overhead of RCU is merely zero, and that replacing
+               the positive overhead of the interrupt-disabling scheme
+               with the zero-overhead RCU scheme does not constitute
+               negative overhead.
+
+               In real life, of course, things are more complex.  But
+               even the theoretical possibility of negative overhead for
+               a synchronization primitive is a bit unexpected.  ;-)
+
+Quick Quiz #3:  If it is illegal to block in an RCU read-side
+               critical section, what the heck do you do in
+               PREEMPT_RT, where normal spinlocks can block???
+
+Answer:                Just as PREEMPT_RT permits preemption of spinlock
+               critical sections, it permits preemption of RCU
+               read-side critical sections.  It also permits
+               spinlocks blocking while in RCU read-side critical
+               sections.
+
+               Why the apparent inconsistency?  Because it is it
+               possible to use priority boosting to keep the RCU
+               grace periods short if need be (for example, if running
+               short of memory).  In contrast, if blocking waiting
+               for (say) network reception, there is no way to know
+               what should be boosted.  Especially given that the
+               process we need to boost might well be a human being
+               who just went out for a pizza or something.  And although
+               a computer-operated cattle prod might arouse serious
+               interest, it might also provoke serious objections.
+               Besides, how does the computer know what pizza parlor
+               the human being went to???
+
+
+ACKNOWLEDGEMENTS
+
+My thanks to the people who helped make this human-readable, including
+Jon Walpole, Josh Triplett, Serge Hallyn, and Suzanne Wood.
+
+
+For more information, see http://www.rdrop.com/users/paulmck/RCU.
index e2d1e76..6a82948 100644 (file)
@@ -36,7 +36,7 @@ cpufreq stats provides following statistics (explained in detail below).
 
 All the statistics will be from the time the stats driver has been inserted 
 to the time when a read of a particular statistic is done. Obviously, stats 
-driver will not have any information about the the frequcny transitions before
+driver will not have any information about the frequency transitions before
 the stats driver insertion.
 
 --------------------------------------------------------------------------------
index 47f4114..d17b7d2 100644 (file)
@@ -277,7 +277,7 @@ rewritten to the 'tasks' file of its cpuset.  This is done to avoid
 impacting the scheduler code in the kernel with a check for changes
 in a tasks processor placement.
 
-There is an exception to the above.  If hotplug funtionality is used
+There is an exception to the above.  If hotplug functionality is used
 to remove all the CPUs that are currently assigned to a cpuset,
 then the kernel will automatically update the cpus_allowed of all
 tasks attached to CPUs in that cpuset to allow all CPUs.  When memory
index 166474c..16e9e63 100644 (file)
@@ -1,4 +1,4 @@
-Below is the orginal README file from the descore.shar package.
+Below is the original README file from the descore.shar package.
 ------------------------------------------------------------------------------
 
 des - fast & portable DES encryption & decryption.
index 5f95d4b..784e08c 100644 (file)
@@ -17,14 +17,6 @@ Who: Greg Kroah-Hartman <greg@kroah.com>
 
 ---------------------------
 
-What:  ACPI S4bios support
-When:  May 2005
-Why:   Noone uses it, and it probably does not work, anyway. swsusp is
-       faster, more reliable, and people are actually using it.
-Who:   Pavel Machek <pavel@suse.cz>
-
----------------------------
-
 What:  io_remap_page_range() (macro or function)
 When:  September 2005
 Why:   Replaced by io_remap_pfn_range() which allows more memory space
index 4ccdcc6..8ec32cc 100644 (file)
@@ -878,7 +878,7 @@ DVD_READ_STRUCT                     Read structure
 
        error returns:
          EINVAL        physical.layer_num exceeds number of layers
-         EIO           Recieved invalid response from drive
+         EIO           Received invalid response from drive
 
 
 
index 6739ab9..807a0c7 100644 (file)
@@ -30,7 +30,7 @@ other program after you have done the following:
    Read the file 'binfmt_misc.txt' in this directory to know
    more about the configuration process.
 
-3) Add the following enries to /etc/rc.local or similar script
+3) Add the following entries to /etc/rc.local or similar script
    to be run at system startup:
 
 # Insert BINFMT_MISC module into the kernel
index 24d0294..a55f0f9 100644 (file)
@@ -1241,7 +1241,7 @@ traffic while still maintaining carrier on.
 
        If running SNMP agents, the bonding driver should be loaded
 before any network drivers participating in a bond.  This requirement
-is due to the the interface index (ipAdEntIfIndex) being associated to
+is due to the interface index (ipAdEntIfIndex) being associated to
 the first interface found with a given IP address.  That is, there is
 only one ipAdEntIfIndex for each IP address.  For example, if eth0 and
 eth1 are slaves of bond0 and the driver for eth0 is loaded before the
@@ -1937,7 +1937,7 @@ switches currently available support 802.3ad.
        If not explicitly configured (with ifconfig or ip link), the
 MAC address of the bonding device is taken from its first slave
 device.  This MAC address is then passed to all following slaves and
-remains persistent (even if the the first slave is removed) until the
+remains persistent (even if the first slave is removed) until the
 bonding device is brought down or reconfigured.
 
        If you wish to change the MAC address, you can set it with
index aea20cd..c96897a 100644 (file)
@@ -355,7 +355,7 @@ REVISION HISTORY
                                There is no functional difference between the two packages         
 
 2.0.7   Aug 26, 1999           o  Merged X25API code into WANPIPE.
-                               o  Fixed a memeory leak for X25API
+                               o  Fixed a memory leak for X25API
                                o  Updated the X25API code for 2.2.X kernels.
                                o  Improved NEM handling.   
 
@@ -514,7 +514,7 @@ beta2-2.2.0 Jan 8 2001
                                o Patches for 2.4.0 kernel
                                o Patches for 2.2.18 kernel
                                o Minor updates to PPP and CHLDC drivers.
-                                 Note: No functinal difference. 
+                                 Note: No functional difference.
 
 beta3-2.2.9    Jan 10 2001
                                o I missed the 2.2.18 kernel patches in beta2-2.2.0
index 76d28d0..711210b 100644 (file)
@@ -84,7 +84,7 @@ Each entry consists of:
 
 Most drivers don't need to use the driver_data field.  Best practice
 for use of driver_data is to use it as an index into a static list of
-equivalant device types, not to use it as a pointer.
+equivalent device types, not to use it as a pointer.
 
 Have a table entry {PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID}
 to have probe() called for every PCI device known to the system.
index 2bfe71b..e75d747 100644 (file)
@@ -134,7 +134,7 @@ pci_get_device_by_addr() will find the pci device associated
 with that address (if any).
 
 The default include/asm-ppc64/io.h macros readb(), inb(), insb(),
-etc. include a check to see if the the i/o read returned all-0xff's.
+etc. include a check to see if the i/o read returned all-0xff's.
 If so, these make a call to eeh_dn_check_failure(), which in turn
 asks the firmware if the all-ff's value is the sign of a true EEH
 error.  If it is not, processing continues as normal.  The grand
index e24fdea..e321a8e 100644 (file)
@@ -468,7 +468,7 @@ The hex_ascii view shows the data field in hex and ascii representation
 The raw view returns a bytestream as the debug areas are stored in memory.
 
 The sprintf view formats the debug entries in the same way as the sprintf
-function would do. The sprintf event/expection fuctions write to the 
+function would do. The sprintf event/expection functions write to the
 debug entry a pointer to the format string (size = sizeof(long)) 
 and for each vararg a long value. So e.g. for a debug entry with a format 
 string plus two varargs one would need to allocate a (3 * sizeof(long)) 
index 2814491..2ffb3ae 100644 (file)
    /proc/scsi/ibmmca/<host_no>. ibmmca_proc_info() provides this information.
    
    This table is quite informative for interested users. It shows the load
-   of commands on the subsystem and wether you are running the bypassed 
+   of commands on the subsystem and whether you are running the bypassed
    (software) or integrated (hardware) SCSI-command set (see below). The
    amount of accesses is shown. Read, write, modeselect is shown separately
    in order to help debugging problems with CD-ROMs or tapedrives.
index 5c49ba0..ebfcdf2 100644 (file)
@@ -1459,7 +1459,7 @@ devices where %i is sound card number from zero to seven.
 To auto-load an ALSA driver for OSS services, define the string
 'sound-slot-%i' where %i means the slot number for OSS, which
 corresponds to the card index of ALSA.  Usually, define this
-as the the same card module.
+as the same card module.
 
 An example configuration for a single emu10k1 card is like below:
 ----- /etc/modprobe.conf
index 136d817..baf17b3 100644 (file)
@@ -171,7 +171,7 @@ the header 'include/linux/sysrq.h', this will define everything else you need.
 Next, you must create a sysrq_key_op struct, and populate it with A) the key
 handler function you will use, B) a help_msg string, that will print when SysRQ
 prints help, and C) an action_msg string, that will print right before your
-handler is called. Your handler must conform to the protoype in 'sysrq.h'.
+handler is called. Your handler must conform to the prototype in 'sysrq.h'.
 
 After the sysrq_key_op is created, you can call the macro 
 register_sysrq_key(int key, struct sysrq_key_op *op_p) that is defined in
index 0c7b654..544430e 100644 (file)
   If you want to access files on the host machine from inside UML, you
   can treat it as a separate machine and either nfs mount directories
   from the host or copy files into the virtual machine with scp or rcp.
-  However, since UML is running on the the host, it can access those
+  However, since UML is running on the host, it can access those
   files just like any other process and make them available inside the
   virtual machine without needing to use the network.
 
index a938c3d..815f5c2 100644 (file)
@@ -20,7 +20,7 @@ License along with this program; if not, write to the Free
 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
 MA 02111-1307 USA.
 
-This document and the the gadget serial driver itself are
+This document and the gadget serial driver itself are
 Copyright (C) 2004 by Al Borchers (alborchers@steinerpoint.com).
 
 If you have questions, problems, or suggestions for this driver
index 01425c2..52c94bd 100644 (file)
@@ -222,7 +222,7 @@ was introduced in 1991, is used in the DC10 old
 can generate: PAL , NTSC , SECAM
 
 The adv717x, should be able to produce PAL N. But you find nothing PAL N 
-specific in the the registers. Seem that you have to reuse a other standard
+specific in the registers. Seem that you have to reuse a other standard
 to generate PAL N, maybe it would work if you use the PAL M settings. 
 
 ==========================
diff --git a/Kbuild b/Kbuild
index 1880e6f..7900391 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -4,7 +4,7 @@
 # 1) Generate asm-offsets.h
 
 #####
-# 1) Generate asm-offsets.h 
+# 1) Generate asm-offsets.h
 #
 
 offsets-file := include/asm-$(ARCH)/asm-offsets.h
@@ -22,6 +22,7 @@ sed-$(CONFIG_MIPS) := "/^@@@/s///p"
 
 quiet_cmd_offsets = GEN     $@
 define cmd_offsets
+       mkdir -p $(dir $@); \
        cat $< | \
        (set -e; \
         echo "#ifndef __ASM_OFFSETS_H__"; \
@@ -43,6 +44,6 @@ arch/$(ARCH)/kernel/asm-offsets.s: arch/$(ARCH)/kernel/asm-offsets.c FORCE
        $(Q)mkdir -p $(dir $@)
        $(call if_changed_dep,cc_s_c)
 
-$(srctree)/$(offsets-file): arch/$(ARCH)/kernel/asm-offsets.s Kbuild
+$(obj)/$(offsets-file): arch/$(ARCH)/kernel/asm-offsets.s Kbuild
        $(call cmd,offsets)
 
index 2402430..382298f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -334,7 +334,7 @@ KALLSYMS    = scripts/kallsyms
 PERL           = perl
 CHECK          = sparse
 
-CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__
+CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ $(CF)
 MODFLAGS       = -DMODULE
 CFLAGS_MODULE   = $(MODFLAGS)
 AFLAGS_MODULE   = $(MODFLAGS)
@@ -382,6 +382,9 @@ RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CV
 scripts_basic:
        $(Q)$(MAKE) $(build)=scripts/basic
 
+# To avoid any implicit rule to kick in, define an empty command.
+scripts/basic/%: scripts_basic ;
+
 .PHONY: outputmakefile
 # outputmakefile generate a Makefile to be placed in output directory, if
 # using a seperate output directory. This allows convinient use
@@ -444,9 +447,8 @@ ifeq ($(config-targets),1)
 include $(srctree)/arch/$(ARCH)/Makefile
 export KBUILD_DEFCONFIG
 
-config: scripts_basic outputmakefile FORCE
-       $(Q)$(MAKE) $(build)=scripts/kconfig $@
-%config: scripts_basic outputmakefile FORCE
+config %config: scripts_basic outputmakefile FORCE
+       $(Q)mkdir -p include/linux
        $(Q)$(MAKE) $(build)=scripts/kconfig $@
 
 else
@@ -641,8 +643,13 @@ quiet_cmd_vmlinux__ ?= LD      $@
 # Generate new vmlinux version
 quiet_cmd_vmlinux_version = GEN     .version
       cmd_vmlinux_version = set -e;                     \
-       . $(srctree)/scripts/mkversion > .tmp_version;  \
-       mv -f .tmp_version .version;                    \
+       if [ ! -r .version ]; then                      \
+         rm -f .version;                               \
+         echo 1 >.version;                             \
+       else                                            \
+         mv .version .old_version;                     \
+         expr 0$$(cat .old_version) + 1 >.version;     \
+       fi;                                             \
        $(MAKE) $(build)=init
 
 # Generate System.map
@@ -756,6 +763,7 @@ endif # ifdef CONFIG_KALLSYMS
 # vmlinux image - including updated kernel symbols
 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) $(kallsyms.o) FORCE
        $(call if_changed_rule,vmlinux__)
+       $(Q)rm -f .old_version
 
 # The actual objects are generated when descending, 
 # make sure no implicit rule kicks in
@@ -806,7 +814,7 @@ ifneq ($(KBUILD_MODULES),)
 endif
 
 prepare0: prepare prepare1 FORCE
-       $(Q)$(MAKE) $(build)=$(srctree)
+       $(Q)$(MAKE) $(build)=.
 
 # All the preparing..
 prepare-all: prepare0
@@ -848,7 +856,7 @@ include/asm:
 
 #      Split autoconf.h into include/linux/config/*
 
-include/config/MARKER: include/linux/autoconf.h
+include/config/MARKER: scripts/basic/split-include include/linux/autoconf.h
        @echo '  SPLIT   include/linux/autoconf.h -> include/config/*'
        @scripts/basic/split-include include/linux/autoconf.h include/config
        @touch $@
@@ -1053,6 +1061,7 @@ help:
        @echo  '  rpm             - Build a kernel as an RPM package'
        @echo  '  tags/TAGS       - Generate tags file for editors'
        @echo  '  cscope          - Generate cscope index'
+       @echo  '  kernelrelease   - Output the release version string'
        @echo  ''
        @echo  'Static analysers'
        @echo  '  buildcheck      - List dangling references to vmlinux discarded sections'
index 224c347..f9da827 100644 (file)
@@ -9,7 +9,7 @@ screen please read "Documentation/oops-tracing.txt" before posting your
 bug report. This explains what you should do with the "Oops" information
 to make it useful to the recipient.
 
-      Send the output the maintainer of the kernel area that seems to
+      Send the output to the maintainer of the kernel area that seems to
 be involved with the problem. Don't worry too much about getting the
 wrong person. If you are unsure send it to the person responsible for the
 code relevant to what you were doing. If it occurs repeatably try and
@@ -18,15 +18,15 @@ The list of maintainers is in the MAINTAINERS file in this directory.
 
       If it is a security bug, please copy the Security Contact listed
 in the MAINTAINERS file.  They can help coordinate bugfix and disclosure.
-See Documentation/SecurityBugs for more infomation.
+See Documentation/SecurityBugs for more information.
 
       If you are totally stumped as to whom to send the report, send it to
 linux-kernel@vger.kernel.org. (For more information on the linux-kernel
 mailing list see http://www.tux.org/lkml/).
 
-This is a suggested format for a bug report sent to the Linux kernel mailing 
-list. Having a standardized bug report form makes it easier  for you not to 
-overlook things, and easier for the developers to find the pieces of 
+This is a suggested format for a bug report sent to the Linux kernel mailing
+list. Having a standardized bug report form makes it easier for you not to
+overlook things, and easier for the developers to find the pieces of
 information they're really interested in. Don't feel you have to follow it.
 
       First run the ver_linux script included as scripts/ver_linux, which
@@ -35,9 +35,9 @@ the command "sh scripts/ver_linux".
 
 Use that information to fill in all fields of the bug report form, and
 post it to the mailing list with a subject of "PROBLEM: <one line
-summary from [1.]>" for easy identification by the developers    
+summary from [1.]>" for easy identification by the developers.
 
-[1.] One line summary of the problem:    
+[1.] One line summary of the problem:
 [2.] Full description of the problem/report:
 [3.] Keywords (i.e., modules, networking, kernel):
 [4.] Kernel version (from /proc/version):
index fc5ef90..24ae9a3 100644 (file)
@@ -185,15 +185,6 @@ EXPORT_SYMBOL(smp_num_cpus);
 EXPORT_SYMBOL(smp_call_function);
 EXPORT_SYMBOL(smp_call_function_on_cpu);
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#ifdef CONFIG_DEBUG_SPINLOCK
-EXPORT_SYMBOL(_raw_spin_unlock);
-EXPORT_SYMBOL(debug_spin_lock);
-EXPORT_SYMBOL(debug_spin_trylock);
-#endif
-#ifdef CONFIG_DEBUG_RWLOCK
-EXPORT_SYMBOL(_raw_write_lock);
-EXPORT_SYMBOL(_raw_read_lock);
-#endif
 EXPORT_SYMBOL(cpu_present_mask);
 #endif /* CONFIG_SMP */
 
index 2b03418..0636116 100644 (file)
@@ -1154,8 +1154,7 @@ osf_usleep_thread(struct timeval32 __user *sleep, struct timeval32 __user *remai
 
        ticks = timeval_to_jiffies(&tmp);
 
-       current->state = TASK_INTERRUPTIBLE;
-       ticks = schedule_timeout(ticks);
+       ticks = schedule_timeout_interruptible(ticks);
 
        if (remain) {
                jiffies_to_timeval(ticks, &tmp);
index e211aa7..da0be34 100644 (file)
@@ -989,175 +989,3 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 
        preempt_enable();
 }
-\f
-#ifdef CONFIG_DEBUG_SPINLOCK
-void
-_raw_spin_unlock(spinlock_t * lock)
-{
-       mb();
-       lock->lock = 0;
-
-       lock->on_cpu = -1;
-       lock->previous = NULL;
-       lock->task = NULL;
-       lock->base_file = "none";
-       lock->line_no = 0;
-}
-
-void
-debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       long tmp;
-       long stuck;
-       void *inline_pc = __builtin_return_address(0);
-       unsigned long started = jiffies;
-       int printed = 0;
-       int cpu = smp_processor_id();
-
-       stuck = 1L << 30;
- try_again:
-
-       /* Use sub-sections to put the actual loop at the end
-          of this object file's text section so as to perfect
-          branch prediction.  */
-       __asm__ __volatile__(
-       "1:     ldl_l   %0,%1\n"
-       "       subq    %2,1,%2\n"
-       "       blbs    %0,2f\n"
-       "       or      %0,1,%0\n"
-       "       stl_c   %0,%1\n"
-       "       beq     %0,3f\n"
-       "4:     mb\n"
-       ".subsection 2\n"
-       "2:     ldl     %0,%1\n"
-       "       subq    %2,1,%2\n"
-       "3:     blt     %2,4b\n"
-       "       blbs    %0,2b\n"
-       "       br      1b\n"
-       ".previous"
-       : "=r" (tmp), "=m" (lock->lock), "=r" (stuck)
-       : "m" (lock->lock), "2" (stuck) : "memory");
-
-       if (stuck < 0) {
-               printk(KERN_WARNING
-                      "%s:%d spinlock stuck in %s at %p(%d)"
-                      " owner %s at %p(%d) %s:%d\n",
-                      base_file, line_no,
-                      current->comm, inline_pc, cpu,
-                      lock->task->comm, lock->previous,
-                      lock->on_cpu, lock->base_file, lock->line_no);
-               stuck = 1L << 36;
-               printed = 1;
-               goto try_again;
-       }
-
-       /* Exiting.  Got the lock.  */
-       lock->on_cpu = cpu;
-       lock->previous = inline_pc;
-       lock->task = current;
-       lock->base_file = base_file;
-       lock->line_no = line_no;
-
-       if (printed) {
-               printk(KERN_WARNING
-                      "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
-                      base_file, line_no, current->comm, inline_pc,
-                      cpu, jiffies - started);
-       }
-}
-
-int
-debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       int ret;
-       if ((ret = !test_and_set_bit(0, lock))) {
-               lock->on_cpu = smp_processor_id();
-               lock->previous = __builtin_return_address(0);
-               lock->task = current;
-       } else {
-               lock->base_file = base_file;
-               lock->line_no = line_no;
-       }
-       return ret;
-}
-#endif /* CONFIG_DEBUG_SPINLOCK */
-\f
-#ifdef CONFIG_DEBUG_RWLOCK
-void _raw_write_lock(rwlock_t * lock)
-{
-       long regx, regy;
-       int stuck_lock, stuck_reader;
-       void *inline_pc = __builtin_return_address(0);
-
- try_again:
-
-       stuck_lock = 1<<30;
-       stuck_reader = 1<<30;
-
-       __asm__ __volatile__(
-       "1:     ldl_l   %1,%0\n"
-       "       blbs    %1,6f\n"
-       "       blt     %1,8f\n"
-       "       mov     1,%1\n"
-       "       stl_c   %1,%0\n"
-       "       beq     %1,6f\n"
-       "4:     mb\n"
-       ".subsection 2\n"
-       "6:     blt     %3,4b   # debug\n"
-       "       subl    %3,1,%3 # debug\n"
-       "       ldl     %1,%0\n"
-       "       blbs    %1,6b\n"
-       "8:     blt     %4,4b   # debug\n"
-       "       subl    %4,1,%4 # debug\n"
-       "       ldl     %1,%0\n"
-       "       blt     %1,8b\n"
-       "       br      1b\n"
-       ".previous"
-       : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy),
-         "=&r" (stuck_lock), "=&r" (stuck_reader)
-       : "m" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory");
-
-       if (stuck_lock < 0) {
-               printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc);
-               goto try_again;
-       }
-       if (stuck_reader < 0) {
-               printk(KERN_WARNING "write_lock stuck on readers at %p\n",
-                      inline_pc);
-               goto try_again;
-       }
-}
-
-void _raw_read_lock(rwlock_t * lock)
-{
-       long regx;
-       int stuck_lock;
-       void *inline_pc = __builtin_return_address(0);
-
- try_again:
-
-       stuck_lock = 1<<30;
-
-       __asm__ __volatile__(
-       "1:     ldl_l   %1,%0;"
-       "       blbs    %1,6f;"
-       "       subl    %1,2,%1;"
-       "       stl_c   %1,%0;"
-       "       beq     %1,6f;"
-       "4:     mb\n"
-       ".subsection 2\n"
-       "6:     ldl     %1,%0;"
-       "       blt     %2,4b   # debug\n"
-       "       subl    %2,1,%2 # debug\n"
-       "       blbs    %1,6b;"
-       "       br      1b\n"
-       ".previous"
-       : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock)
-       : "m" (*(volatile int *)lock), "2" (stuck_lock) : "memory");
-
-       if (stuck_lock < 0) {
-               printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc);
-               goto try_again;
-       }
-}
-#endif /* CONFIG_DEBUG_RWLOCK */
index 0f2899b..11fff04 100644 (file)
@@ -326,8 +326,8 @@ config SMP
          processor machines. On a single processor machine, the kernel will
          run faster if you say N here.
 
-         See also the <file:Documentation/smp.tex>,
-         <file:Documentation/smp.txt>, <file:Documentation/i386/IO-APIC.txt>,
+         See also the <file:Documentation/smp.txt>,
+         <file:Documentation/i386/IO-APIC.txt>,
          <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
          <http://www.linuxdoc.org/docs.html#howto>.
 
index 45a5709..5d3acff 100644 (file)
@@ -53,7 +53,7 @@ config DEBUG_LL
        bool "Kernel low-level debugging functions"
        depends on DEBUG_KERNEL
        help
-         Say Y here to include definitions of printascii, printchar, printhex
+         Say Y here to include definitions of printascii, printch, printhex
          in the kernel.  This is helpful if you are debugging code that
          executes before the console is initialized.
 
index 688a595..d3a04c2 100644 (file)
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(read_scoop_reg);
 EXPORT_SYMBOL(write_scoop_reg);
 
 #ifdef CONFIG_PM
-static int scoop_suspend(struct device *dev, uint32_t state, uint32_t level)
+static int scoop_suspend(struct device *dev, pm_message_t state, uint32_t level)
 {
        if (level == SUSPEND_POWER_DOWN) {
                struct scoop_dev *sdev = dev_get_drvdata(dev);
index db07ce4..949ec44 100644 (file)
@@ -10,7 +10,7 @@
  *  This file is included twice in entry-common.S
  */
 #ifndef NR_syscalls
-#define NR_syscalls 320
+#define NR_syscalls 328
 #else
 
 __syscall_start:
@@ -333,6 +333,9 @@ __syscall_start:
                .long   sys_inotify_init
                .long   sys_inotify_add_watch
                .long   sys_inotify_rm_watch
+               .long   sys_mbind_wrapper
+/* 320 */      .long   sys_get_mempolicy
+               .long   sys_set_mempolicy
 __syscall_end:
 
                .rept   NR_syscalls - (__syscall_end - __syscall_start) / 4
index 6281d48..db302c6 100644 (file)
@@ -269,6 +269,10 @@ sys_arm_fadvise64_64_wrapper:
                str     r5, [sp, #4]            @ push r5 to stack
                b       sys_arm_fadvise64_64
 
+sys_mbind_wrapper:
+               str     r5, [sp, #4]
+               b       sys_mbind
+
 /*
  * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
  * offset, we return EINVAL.
index 29185ac..07b5dd4 100644 (file)
@@ -131,27 +131,12 @@ static struct platform_device corgits_device = {
 /*
  * MMC/SD Device
  *
- * The card detect interrupt isn't debounced so we delay it by HZ/4
+ * The card detect interrupt isn't debounced so we delay it by 250ms
  * to give the card a chance to fully insert/eject.
  */
-static struct mmc_detect {
-       struct timer_list detect_timer;
-       void *devid;
-} mmc_detect;
+static struct pxamci_platform_data corgi_mci_platform_data;
 
-static void mmc_detect_callback(unsigned long data)
-{
-       mmc_detect_change(mmc_detect.devid);
-}
-
-static irqreturn_t corgi_mmc_detect_int(int irq, void *devid, struct pt_regs *regs)
-{
-       mmc_detect.devid=devid;
-       mod_timer(&mmc_detect.detect_timer, jiffies + HZ/4);
-       return IRQ_HANDLED;
-}
-
-static int corgi_mci_init(struct device *dev, irqreturn_t (*unused_detect_int)(int, void *, struct pt_regs *), void *data)
+static int corgi_mci_init(struct device *dev, irqreturn_t (*corgi_detect_int)(int, void *, struct pt_regs *), void *data)
 {
        int err;
 
@@ -161,11 +146,9 @@ static int corgi_mci_init(struct device *dev, irqreturn_t (*unused_detect_int)(i
        pxa_gpio_mode(CORGI_GPIO_nSD_DETECT | GPIO_IN);
        pxa_gpio_mode(CORGI_GPIO_SD_PWR | GPIO_OUT);
 
-       init_timer(&mmc_detect.detect_timer);
-       mmc_detect.detect_timer.function = mmc_detect_callback;
-       mmc_detect.detect_timer.data = (unsigned long) &mmc_detect;
+       corgi_mci_platform_data.detect_delay = msecs_to_jiffies(250);
 
-       err = request_irq(CORGI_IRQ_GPIO_nSD_DETECT, corgi_mmc_detect_int, SA_INTERRUPT,
+       err = request_irq(CORGI_IRQ_GPIO_nSD_DETECT, corgi_detect_int, SA_INTERRUPT,
                             "MMC card detect", data);
        if (err) {
                printk(KERN_ERR "corgi_mci_init: MMC/SD: can't request MMC card detect IRQ\n");
@@ -198,7 +181,6 @@ static int corgi_mci_get_ro(struct device *dev)
 static void corgi_mci_exit(struct device *dev, void *data)
 {
        free_irq(CORGI_IRQ_GPIO_nSD_DETECT, data);
-       del_timer(&mmc_detect.detect_timer);
 }
 
 static struct pxamci_platform_data corgi_mci_platform_data = {
index 44d886c..7c74fe0 100644 (file)
@@ -304,12 +304,6 @@ ret_point:
        call    restore_processor_state
        ret
 
-ENTRY(do_suspend_lowlevel_s4bios)
-       call save_processor_state
-       call save_registers
-       call acpi_enter_sleep_state_s4bios
-       ret
-
 ALIGN
 # saved registers
 saved_gdt:     .long   0,0
index 46ce9b2..9ad43be 100644 (file)
@@ -151,7 +151,7 @@ static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
 }
 
 
-void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
 {
        char *v = c->x86_vendor_id;
        int i;
index 1efdc76..35d3ce2 100644 (file)
@@ -573,8 +573,7 @@ static int balanced_irq(void *unused)
        }
 
        for ( ; ; ) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               time_remaining = schedule_timeout(time_remaining);
+               time_remaining = schedule_timeout_interruptible(time_remaining);
                try_to_freeze();
                if (time_after(jiffies,
                                prev_balance_time+balanced_irq_interval)) {
index 5e4893d..c70cd2a 100644 (file)
@@ -1330,8 +1330,7 @@ void __cpu_die(unsigned int cpu)
                        printk ("CPU %d is now offline\n", cpu);
                        return;
                }
-               current->state = TASK_UNINTERRUPTIBLE;
-               schedule_timeout(HZ/10);
+               msleep(100);
        }
        printk(KERN_ERR "CPU %u didn't die...\n", cpu);
 }
index 13b9c62..4710195 100644 (file)
@@ -144,12 +144,7 @@ SECTIONS
        *(.exitcall.exit)
        }
 
-  /* Stabs debugging sections.  */
-  .stab 0 : { *(.stab) }
-  .stabstr 0 : { *(.stabstr) }
-  .stab.excl 0 : { *(.stab.excl) }
-  .stab.exclstr 0 : { *(.stab.exclstr) }
-  .stab.index 0 : { *(.stab.index) }
-  .stab.indexstr 0 : { *(.stab.indexstr) }
-  .comment 0 : { *(.comment) }
+  STABS_DEBUG
+
+  DWARF_DEBUG
 }
index 4ebbf39..8d48420 100644 (file)
@@ -491,12 +491,7 @@ init_handler_platform (pal_min_state_area_t *ms,
        unw_init_from_interruption(&info, current, pt, sw);
        ia64_do_show_stack(&info, NULL);
 
-#ifdef CONFIG_SMP
-       /* read_trylock() would be handy... */
-       if (!tasklist_lock.write_lock)
-               read_lock(&tasklist_lock);
-#endif
-       {
+       if (read_trylock(&tasklist_lock)) {
                struct task_struct *g, *t;
                do_each_thread (g, t) {
                        if (t == current)
@@ -506,10 +501,6 @@ init_handler_platform (pal_min_state_area_t *ms,
                        show_stack(t, NULL);
                } while_each_thread (g, t);
        }
-#ifdef CONFIG_SMP
-       if (!tasklist_lock.write_lock)
-               read_unlock(&tasklist_lock);
-#endif
 
        printk("\nINIT dump complete.  Please reboot now.\n");
        while (1);                      /* hang city if no debugger */
index 7622d4e..1ef3987 100644 (file)
@@ -242,8 +242,8 @@ config SMP
          Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
          Management" code will be disabled if you say Y here.
 
-         See also the <file:Documentation/smp.tex>,
-         <file:Documentation/smp.txt> and the SMP-HOWTO available at
+         See also the <file:Documentation/smp.txt>,
+         and the SMP-HOWTO available at
          <http://www.linuxdoc.org/docs.html#howto>.
 
          If you don't know what to do here, say N.
index 48b187f..a4576ac 100644 (file)
@@ -892,7 +892,6 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
        int try)
 {
        spinlock_t *ipilock;
-       unsigned long flags = 0;
        volatile unsigned long *ipicr_addr;
        unsigned long ipicr_val;
        unsigned long my_physid_mask;
@@ -916,50 +915,27 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
         * write IPICRi (send IPIi)
         * unlock ipi_lock[i]
         */
+       spin_lock(ipilock);
        __asm__ __volatile__ (
-               ";; LOCK ipi_lock[i]            \n\t"
+               ";; CHECK IPICRi == 0           \n\t"
                ".fillinsn                      \n"
                "1:                             \n\t"
-               "mvfc   %1, psw                 \n\t"
-               "clrpsw #0x40 -> nop            \n\t"
-               DCACHE_CLEAR("r4", "r5", "%2")
-               "lock   r4, @%2                 \n\t"
-               "addi   r4, #-1                 \n\t"
-               "unlock r4, @%2                 \n\t"
-               "mvtc   %1, psw                 \n\t"
-               "bnez   r4, 2f                  \n\t"
-               LOCK_SECTION_START(".balign 4 \n\t")
-               ".fillinsn                      \n"
-               "2:                             \n\t"
-               "ld     r4, @%2                 \n\t"
-               "blez   r4, 2b                  \n\t"
+               "ld     %0, @%1                 \n\t"
+               "and    %0, %4                  \n\t"
+               "beqz   %0, 2f                  \n\t"
+               "bnez   %3, 3f                  \n\t"
                "bra    1b                      \n\t"
-               LOCK_SECTION_END
-               ";; CHECK IPICRi == 0           \n\t"
-               ".fillinsn                      \n"
-               "3:                             \n\t"
-               "ld     %0, @%3                 \n\t"
-               "and    %0, %6                  \n\t"
-               "beqz   %0, 4f                  \n\t"
-               "bnez   %5, 5f                  \n\t"
-               "bra    3b                      \n\t"
                ";; WRITE IPICRi (send IPIi)    \n\t"
                ".fillinsn                      \n"
-               "4:                             \n\t"
-               "st     %4, @%3                 \n\t"
-               ";; UNLOCK ipi_lock[i]          \n\t"
+               "2:                             \n\t"
+               "st     %2, @%1                 \n\t"
                ".fillinsn                      \n"
-               "5:                             \n\t"
-               "ldi    r4, #1                  \n\t"
-               "st     r4, @%2                 \n\t"
+               "3:                             \n\t"
                : "=&r"(ipicr_val)
-               : "r"(flags), "r"(&ipilock->slock), "r"(ipicr_addr),
-                 "r"(mask), "r"(try), "r"(my_physid_mask)
-               : "memory", "r4"
-#ifdef CONFIG_CHIP_M32700_TS1
-               , "r5"
-#endif /* CONFIG_CHIP_M32700_TS1 */
+               : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
+               : "memory"
        );
+       spin_unlock(ipilock);
 
        return ipicr_val;
 }
index 4c114ae..eff8932 100644 (file)
@@ -440,18 +440,6 @@ struct irix5_siginfo {
        } stuff;
 };
 
-static inline unsigned long timespectojiffies(struct timespec *value)
-{
-       unsigned long sec = (unsigned) value->tv_sec;
-       long nsec = value->tv_nsec;
-
-       if (sec > (LONG_MAX / HZ))
-               return LONG_MAX;
-       nsec += 1000000000L / HZ - 1;
-       nsec /= 1000000000L / HZ;
-       return HZ * sec + nsec;
-}
-
 asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info,
                                struct timespec *tp)
 {
@@ -489,14 +477,13 @@ asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info,
                        error = -EINVAL;
                        goto out;
                }
-               expire = timespectojiffies(tp)+(tp->tv_sec||tp->tv_nsec);
+               expire = timespec_to_jiffies(tp) + (tp->tv_sec||tp->tv_nsec);
        }
 
        while(1) {
                long tmp = 0;
 
-               current->state = TASK_INTERRUPTIBLE;
-               expire = schedule_timeout(expire);
+               expire = schedule_timeout_interruptible(expire);
 
                for (i=0; i<=4; i++)
                        tmp |= (current->pending.signal.sig[i] & kset.sig[i]);
index b465954..4de1556 100644 (file)
@@ -1032,8 +1032,7 @@ bad:
 
 asmlinkage int irix_sginap(int ticks)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(ticks);
+       schedule_timeout_interruptible(ticks);
        return 0;
 }
 
index e44e957..fd82c84 100644 (file)
  * has a cmpxchg, and where atomic->value is an int holding
  * the value of the atomic (i.e. the high bits aren't used
  * for a lock or anything like that).
- *
- * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h
- * if spinlocks are empty and thus atomic_dec_and_lock is defined
- * to be atomic_dec_and_test - in that case we don't need it
- * defined here as well.
  */
-
-#ifndef ATOMIC_DEC_AND_LOCK
 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 {
        int counter;
@@ -52,4 +45,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif /* ATOMIC_DEC_AND_LOCK */
index 7bf7056..5f2e690 100644 (file)
@@ -5,5 +5,3 @@
 lib-y  := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o
 
 obj-y  := iomap.o
-
-lib-$(CONFIG_SMP) += debuglocks.o
index 2de182f..90f400b 100644 (file)
@@ -13,8 +13,8 @@
 #include <asm/atomic.h>
 
 #ifdef CONFIG_SMP
-spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
-       [0 ... (ATOMIC_HASH_SIZE-1)]  = SPIN_LOCK_UNLOCKED
+raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
+       [0 ... (ATOMIC_HASH_SIZE-1)]  = __RAW_SPIN_LOCK_UNLOCKED
 };
 #endif
 
diff --git a/arch/parisc/lib/debuglocks.c b/arch/parisc/lib/debuglocks.c
deleted file mode 100644 (file)
index 1b33fe6..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-/* 
- *    Debugging versions of SMP locking primitives.
- *
- *    Copyright (C) 2004 Thibaut VARENE <varenet@parisc-linux.org>
- *
- *    Some code stollen from alpha & sparc64 ;)
- *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; either version 2 of the License, or
- *    (at your option) any later version.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *    GNU General Public License for more details.
- *
- *    You should have received a copy of the GNU General Public License
- *    along with this program; if not, write to the Free Software
- *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- *    We use pdc_printf() throughout the file for all output messages, to avoid
- *    losing messages because of disabled interrupts. Since we're using these
- *    messages for debugging purposes, it makes sense not to send them to the
- *    linux console.
- */
-
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/hardirq.h>     /* in_interrupt() */
-#include <asm/system.h>
-#include <asm/hardirq.h>       /* in_interrupt() */
-#include <asm/pdc.h>
-
-#undef INIT_STUCK
-#define INIT_STUCK 1L << 30
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-
-
-void _dbg_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       volatile unsigned int *a;
-       long stuck = INIT_STUCK;
-       void *inline_pc = __builtin_return_address(0);
-       unsigned long started = jiffies;
-       int printed = 0;
-       int cpu = smp_processor_id();
-
-try_again:
-
-       /* Do the actual locking */
-       /* <T-Bone> ggg: we can't get stuck on the outter loop?
-        * <ggg> T-Bone: We can hit the outer loop
-        *      alot if multiple CPUs are constantly racing for a lock
-        *      and the backplane is NOT fair about which CPU sees
-        *      the update first. But it won't hang since every failed
-        *      attempt will drop us back into the inner loop and
-        *      decrement `stuck'.
-        * <ggg> K-class and some of the others are NOT fair in the HW
-        *      implementation so we could see false positives.
-        *      But fixing the lock contention is easier than
-        *      fixing the HW to be fair.
-        * <tausq> __ldcw() returns 1 if we get the lock; otherwise we
-        *      spin until the value of the lock changes, or we time out.
-        */
-       mb();
-       a = __ldcw_align(lock);
-       while (stuck && (__ldcw(a) == 0))
-               while ((*a == 0) && --stuck);
-       mb();
-
-       if (unlikely(stuck <= 0)) {
-               pdc_printf(
-                       "%s:%d: spin_lock(%s/%p) stuck in %s at %p(%d)"
-                       " owned by %s:%d in %s at %p(%d)\n",
-                       base_file, line_no, lock->module, lock,
-                       current->comm, inline_pc, cpu,
-                       lock->bfile, lock->bline, lock->task->comm,
-                       lock->previous, lock->oncpu);
-               stuck = INIT_STUCK;
-               printed = 1;
-               goto try_again;
-       }
-
-       /* Exiting.  Got the lock.  */
-       lock->oncpu = cpu;
-       lock->previous = inline_pc;
-       lock->task = current;
-       lock->bfile = (char *)base_file;
-       lock->bline = line_no;
-
-       if (unlikely(printed)) {
-               pdc_printf(
-                       "%s:%d: spin_lock grabbed in %s at %p(%d) %ld ticks\n",
-                       base_file, line_no, current->comm, inline_pc,
-                       cpu, jiffies - started);
-       }
-}
-
-void _dbg_spin_unlock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       CHECK_LOCK(lock);
-       volatile unsigned int *a;
-       mb();
-       a = __ldcw_align(lock);
-       if (unlikely((*a != 0) && lock->babble)) {
-               lock->babble--;
-               pdc_printf(
-                       "%s:%d: spin_unlock(%s:%p) not locked\n",
-                       base_file, line_no, lock->module, lock);
-       }
-       *a = 1; 
-       mb();
-}
-
-int _dbg_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       int ret;
-       volatile unsigned int *a;
-       mb();
-       a = __ldcw_align(lock);
-       ret = (__ldcw(a) != 0);
-       mb();
-       if (ret) {
-               lock->oncpu = smp_processor_id();
-               lock->previous = __builtin_return_address(0);
-               lock->task = current;
-       } else {
-               lock->bfile = (char *)base_file;
-               lock->bline = line_no;
-       }
-       return ret;
-}
-
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
-#ifdef CONFIG_DEBUG_RWLOCK
-
-/* Interrupts trouble detailed explanation, thx Grant:
- *
- * o writer (wants to modify data) attempts to acquire the rwlock
- * o He gets the write lock.
- * o Interupts are still enabled, we take an interrupt with the
- *   write still holding the lock.
- * o interrupt handler tries to acquire the rwlock for read.
- * o deadlock since the writer can't release it at this point.
- * 
- * In general, any use of spinlocks that competes between "base"
- * level and interrupt level code will risk deadlock. Interrupts
- * need to be disabled in the base level routines to avoid it.
- * Or more precisely, only the IRQ the base level routine
- * is competing with for the lock.  But it's more efficient/faster
- * to just disable all interrupts on that CPU to guarantee
- * once it gets the lock it can release it quickly too.
- */
-void _dbg_write_lock(rwlock_t *rw, const char *bfile, int bline)
-{
-       void *inline_pc = __builtin_return_address(0);
-       unsigned long started = jiffies;
-       long stuck = INIT_STUCK;
-       int printed = 0;
-       int cpu = smp_processor_id();
-       
-       if(unlikely(in_interrupt())) {  /* acquiring write lock in interrupt context, bad idea */
-               pdc_printf("write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline);
-               BUG();
-       }
-
-       /* Note: if interrupts are disabled (which is most likely), the printk
-       will never show on the console. We might need a polling method to flush
-       the dmesg buffer anyhow. */
-       
-retry:
-       _raw_spin_lock(&rw->lock);
-
-       if(rw->counter != 0) {
-               /* this basically never happens */
-               _raw_spin_unlock(&rw->lock);
-               
-               stuck--;
-               if ((unlikely(stuck <= 0)) && (rw->counter < 0)) {
-                       pdc_printf(
-                               "%s:%d: write_lock stuck on writer"
-                               " in %s at %p(%d) %ld ticks\n",
-                               bfile, bline, current->comm, inline_pc,
-                               cpu, jiffies - started);
-                       stuck = INIT_STUCK;
-                       printed = 1;
-               }
-               else if (unlikely(stuck <= 0)) {
-                       pdc_printf(
-                               "%s:%d: write_lock stuck on reader"
-                               " in %s at %p(%d) %ld ticks\n",
-                               bfile, bline, current->comm, inline_pc,
-                               cpu, jiffies - started);
-                       stuck = INIT_STUCK;
-                       printed = 1;
-               }
-               
-               while(rw->counter != 0);
-
-               goto retry;
-       }
-
-       /* got it.  now leave without unlocking */
-       rw->counter = -1; /* remember we are locked */
-
-       if (unlikely(printed)) {
-               pdc_printf(
-                       "%s:%d: write_lock grabbed in %s at %p(%d) %ld ticks\n",
-                       bfile, bline, current->comm, inline_pc,
-                       cpu, jiffies - started);
-       }
-}
-
-int _dbg_write_trylock(rwlock_t *rw, const char *bfile, int bline)
-{
-#if 0
-       void *inline_pc = __builtin_return_address(0);
-       int cpu = smp_processor_id();
-#endif
-       
-       if(unlikely(in_interrupt())) {  /* acquiring write lock in interrupt context, bad idea */
-               pdc_printf("write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline);
-               BUG();
-       }
-
-       /* Note: if interrupts are disabled (which is most likely), the printk
-       will never show on the console. We might need a polling method to flush
-       the dmesg buffer anyhow. */
-       
-       _raw_spin_lock(&rw->lock);
-
-       if(rw->counter != 0) {
-               /* this basically never happens */
-               _raw_spin_unlock(&rw->lock);
-               return 0;
-       }
-
-       /* got it.  now leave without unlocking */
-       rw->counter = -1; /* remember we are locked */
-#if 0
-       pdc_printf("%s:%d: try write_lock grabbed in %s at %p(%d)\n",
-                  bfile, bline, current->comm, inline_pc, cpu);
-#endif
-       return 1;
-}
-
-void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline)
-{
-#if 0
-       void *inline_pc = __builtin_return_address(0);
-       unsigned long started = jiffies;
-       int cpu = smp_processor_id();
-#endif
-       unsigned long flags;
-
-       local_irq_save(flags);
-       _raw_spin_lock(&rw->lock); 
-
-       rw->counter++;
-#if 0
-       pdc_printf(
-               "%s:%d: read_lock grabbed in %s at %p(%d) %ld ticks\n",
-               bfile, bline, current->comm, inline_pc,
-               cpu, jiffies - started);
-#endif
-       _raw_spin_unlock(&rw->lock);
-       local_irq_restore(flags);
-}
-
-#endif /* CONFIG_DEBUG_RWLOCK */
index e3f1ce3..347ea28 100644 (file)
@@ -265,6 +265,15 @@ config PPC601_SYNC_FIX
 
          If in doubt, say Y here.
 
+config HOTPLUG_CPU
+       bool "Support for enabling/disabling CPUs"
+       depends on SMP && HOTPLUG && EXPERIMENTAL && PPC_PMAC
+       ---help---
+         Say Y here to be able to disable and re-enable individual
+         CPUs at runtime on SMP machines.
+
+         Say N if you are unsure.
+
 source arch/ppc/platforms/4xx/Kconfig
 source arch/ppc/platforms/85xx/Kconfig
 
index 4b3fe39..6dd7b50 100644 (file)
@@ -21,13 +21,14 @@ CC          := $(CC) -m32
 endif
 
 LDFLAGS_vmlinux        := -Ttext $(KERNELLOAD) -Bstatic
-CPPFLAGS       += -Iarch/$(ARCH) -Iinclude3
+# The -Iarch/$(ARCH)/include is temporary while we are merging
+CPPFLAGS       += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
 AFLAGS         += -Iarch/$(ARCH)
 CFLAGS         += -Iarch/$(ARCH) -msoft-float -pipe \
                -ffixed-r2 -mmultiple
 CPP            = $(CC) -E $(CFLAGS)
 # Temporary hack until we have migrated to asm-powerpc
-LINUXINCLUDE    += -Iinclude3
+LINUXINCLUDE    += -Iarch/$(ARCH)/include
 
 CHECKFLAGS     += -D__powerpc__
 
@@ -103,15 +104,16 @@ endef
 
 archclean:
        $(Q)$(MAKE) $(clean)=arch/ppc/boot
-       $(Q)rm -rf include3
+       # Temporary hack until we have migrated to asm-powerpc
+       $(Q)rm -rf arch/$(ARCH)/include
 
 prepare: checkbin
 
 # Temporary hack until we have migrated to asm-powerpc
-include/asm: include3/asm
-include3/asm:
-       $(Q)if [ ! -d include3 ]; then mkdir -p include3; fi
-       $(Q)ln -fsn $(srctree)/include/asm-powerpc include3/asm
+include/asm: arch/$(ARCH)/include/asm
+arch/$(ARCH)/include/asm:
+       $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
+       $(Q)ln -fsn $(srctree)/include/asm-powerpc arch/$(ARCH)/include/asm
 
 # Use the file '.tmp_gas_check' for binutils tests, as gas won't output
 # to stdout and these checks are run even on install targets.
index 1f37b7e..ba39643 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/config.h>
 #include <asm/processor.h>
 #include <asm/page.h>
-#include <asm/ppc_asm.h>
 #include <asm/cputable.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
index 304589a..7e4fbb6 100644 (file)
@@ -14,7 +14,6 @@
 #include <asm/page.h>
 #include <asm/ppc_asm.h>
 #include <asm/cputable.h>
-#include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
 
index e0c631c..b566d98 100644 (file)
@@ -393,7 +393,7 @@ EXPORT_SYMBOL(__dma_sync);
  * __dma_sync_page() implementation for systems using highmem.
  * In this case, each page of a buffer must be kmapped/kunmapped
  * in order to have a virtual address for __dma_sync(). This must
- * not sleep so kmap_atmomic()/kunmap_atomic() are used.
+ * not sleep so kmap_atomic()/kunmap_atomic() are used.
  *
  * Note: yes, it is possible and correct to have a buffer extend
  * beyond the first page.
index 55daf12..1960fb8 100644 (file)
@@ -1023,23 +1023,21 @@ __secondary_start_gemini:
         andc    r4,r4,r3
         mtspr   SPRN_HID0,r4
         sync
-        bl      gemini_prom_init
         b       __secondary_start
 #endif /* CONFIG_GEMINI */
-       .globl  __secondary_start_psurge
-__secondary_start_psurge:
-       li      r24,1                   /* cpu # */
-       b       __secondary_start_psurge99
-       .globl  __secondary_start_psurge2
-__secondary_start_psurge2:
-       li      r24,2                   /* cpu # */
-       b       __secondary_start_psurge99
-       .globl  __secondary_start_psurge3
-__secondary_start_psurge3:
-       li      r24,3                   /* cpu # */
-       b       __secondary_start_psurge99
-__secondary_start_psurge99:
-       /* we come in here with IR=0 and DR=1, and DBAT 0
+
+       .globl  __secondary_start_pmac_0
+__secondary_start_pmac_0:
+       /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
+       li      r24,0
+       b       1f
+       li      r24,1
+       b       1f
+       li      r24,2
+       b       1f
+       li      r24,3
+1:
+       /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
           set to map the 0xf0000000 - 0xffffffff region */
        mfmsr   r0
        rlwinm  r0,r0,0,28,26           /* clear DR (0x10) */
index 53547b6..fba29c8 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/sysctl.h>
+#include <linux/cpu.h>
 
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
@@ -35,6 +36,7 @@
 void default_idle(void)
 {
        void (*powersave)(void);
+       int cpu = smp_processor_id();
 
        powersave = ppc_md.power_save;
 
@@ -44,7 +46,7 @@ void default_idle(void)
 #ifdef CONFIG_SMP
                else {
                        set_thread_flag(TIF_POLLING_NRFLAG);
-                       while (!need_resched())
+                       while (!need_resched() && !cpu_is_offline(cpu))
                                barrier();
                        clear_thread_flag(TIF_POLLING_NRFLAG);
                }
@@ -52,6 +54,8 @@ void default_idle(void)
        }
        if (need_resched())
                schedule();
+       if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
+               cpu_die();
 }
 
 /*
index e70b587..726fe7c 100644 (file)
@@ -45,6 +45,7 @@ cpumask_t cpu_online_map;
 cpumask_t cpu_possible_map;
 int smp_hw_index[NR_CPUS];
 struct thread_info *secondary_ti;
+static struct task_struct *idle_tasks[NR_CPUS];
 
 EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL(cpu_possible_map);
@@ -286,7 +287,8 @@ static void __devinit smp_store_cpu_info(int id)
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-       int num_cpus, i;
+       int num_cpus, i, cpu;
+       struct task_struct *p;
 
        /* Fixup boot cpu */
         smp_store_cpu_info(smp_processor_id());
@@ -308,6 +310,17 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 
        if (smp_ops->space_timers)
                smp_ops->space_timers(num_cpus);
+
+       for_each_cpu(cpu) {
+               if (cpu == smp_processor_id())
+                       continue;
+               /* create a process for the processor */
+               p = fork_idle(cpu);
+               if (IS_ERR(p))
+                       panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
+               p->thread_info->cpu = cpu;
+               idle_tasks[cpu] = p;
+       }
 }
 
 void __devinit smp_prepare_boot_cpu(void)
@@ -334,12 +347,17 @@ int __devinit start_secondary(void *unused)
        set_dec(tb_ticks_per_jiffy);
        cpu_callin_map[cpu] = 1;
 
-       printk("CPU %i done callin...\n", cpu);
+       printk("CPU %d done callin...\n", cpu);
        smp_ops->setup_cpu(cpu);
-       printk("CPU %i done setup...\n", cpu);
-       local_irq_enable();
+       printk("CPU %d done setup...\n", cpu);
        smp_ops->take_timebase();
-       printk("CPU %i done timebase take...\n", cpu);
+       printk("CPU %d done timebase take...\n", cpu);
+
+       spin_lock(&call_lock);
+       cpu_set(cpu, cpu_online_map);
+       spin_unlock(&call_lock);
+
+       local_irq_enable();
 
        cpu_idle();
        return 0;
@@ -347,17 +365,11 @@ int __devinit start_secondary(void *unused)
 
 int __cpu_up(unsigned int cpu)
 {
-       struct task_struct *p;
        char buf[32];
        int c;
 
-       /* create a process for the processor */
-       /* only regs.msr is actually used, and 0 is OK for it */
-       p = fork_idle(cpu);
-       if (IS_ERR(p))
-               panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
-       secondary_ti = p->thread_info;
-       p->thread_info->cpu = cpu;
+       secondary_ti = idle_tasks[cpu]->thread_info;
+       mb();
 
        /*
         * There was a cache flush loop here to flush the cache
@@ -389,7 +401,11 @@ int __cpu_up(unsigned int cpu)
        printk("Processor %d found.\n", cpu);
 
        smp_ops->give_timebase();
-       cpu_set(cpu, cpu_online_map);
+
+       /* Wait until cpu puts itself in the online map */
+       while (!cpu_online(cpu))
+               cpu_relax();
+
        return 0;
 }
 
index 8356d54..961ede8 100644 (file)
@@ -118,6 +118,28 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
        info.si_code = code;
        info.si_addr = (void __user *) addr;
        force_sig_info(signr, &info, current);
+
+       /*
+        * Init gets no signals that it doesn't have a handler for.
+        * That's all very well, but if it has caused a synchronous
+        * exception and we ignore the resulting signal, it will just
+        * generate the same exception over and over again and we get
+        * nowhere.  Better to kill it and let the kernel panic.
+        */
+       if (current->pid == 1) {
+               __sighandler_t handler;
+
+               spin_lock_irq(&current->sighand->siglock);
+               handler = current->sighand->action[signr-1].sa.sa_handler;
+               spin_unlock_irq(&current->sighand->siglock);
+               if (handler == SIG_DFL) {
+                       /* init has generated a synchronous exception
+                          and it doesn't have a handler for the signal */
+                       printk(KERN_CRIT "init has generated signal %d "
+                              "but has no handler for it\n", signr);
+                       do_exit(signr);
+               }
+       }
 }
 
 /*
index 1c380e6..f1e1fb4 100644 (file)
@@ -4,6 +4,5 @@
 
 obj-y                  := checksum.o string.o strcase.o dec_and_lock.o div64.o
 
-obj-$(CONFIG_SMP)      += locks.o
 obj-$(CONFIG_8xx)      += rheap.o
 obj-$(CONFIG_CPM2)     += rheap.o
index 4ee8880..b18f0d9 100644 (file)
  * has a cmpxchg, and where atomic->value is an int holding
  * the value of the atomic (i.e. the high bits aren't used
  * for a lock or anything like that).
- *
- * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h
- * if spinlocks are empty and thus atomic_dec_and_lock is defined
- * to be atomic_dec_and_test - in that case we don't need it
- * defined here as well.
  */
-
-#ifndef ATOMIC_DEC_AND_LOCK
 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 {
        int counter;
@@ -43,4 +36,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif /* ATOMIC_DEC_AND_LOCK */
index 57d9930..ee5e9f2 100644 (file)
@@ -278,11 +278,7 @@ bad_area:
 
        /* User mode accesses cause a SIGSEGV */
        if (user_mode(regs)) {
-               info.si_signo = SIGSEGV;
-               info.si_errno = 0;
-               info.si_code = code;
-               info.si_addr = (void __user *) address;
-               force_sig_info(SIGSEGV, &info, current);
+               _exception(SIGSEGV, regs, code, address);
                return 0;
        }
 
index 8d67adc..88419c7 100644 (file)
@@ -161,6 +161,8 @@ _GLOBAL(low_sleep_handler)
        addi r3,r3,sleep_storage@l
        stw r5,0(r3)
 
+       .globl  low_cpu_die
+low_cpu_die:
        /* Flush & disable all caches */
        bl      flush_disable_caches
 
index 8e049da..794a239 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/spinlock.h>
 #include <linux/errno.h>
 #include <linux/hardirq.h>
+#include <linux/cpu.h>
 
 #include <asm/ptrace.h>
 #include <asm/atomic.h>
@@ -55,9 +56,7 @@
  * Powersurge (old powermac SMP) support.
  */
 
-extern void __secondary_start_psurge(void);
-extern void __secondary_start_psurge2(void);   /* Temporary horrible hack */
-extern void __secondary_start_psurge3(void);   /* Temporary horrible hack */
+extern void __secondary_start_pmac_0(void);
 
 /* Addresses for powersurge registers */
 #define HAMMERHEAD_BASE                0xf8000000
@@ -119,7 +118,7 @@ static volatile int sec_tb_reset = 0;
 static unsigned int pri_tb_hi, pri_tb_lo;
 static unsigned int pri_tb_stamp;
 
-static void __init core99_init_caches(int cpu)
+static void __devinit core99_init_caches(int cpu)
 {
        if (!cpu_has_feature(CPU_FTR_L2CR))
                return;
@@ -346,7 +345,7 @@ static int __init smp_psurge_probe(void)
 
 static void __init smp_psurge_kick_cpu(int nr)
 {
-       void (*start)(void) = __secondary_start_psurge;
+       unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
        unsigned long a;
 
        /* may need to flush here if secondary bats aren't setup */
@@ -356,17 +355,7 @@ static void __init smp_psurge_kick_cpu(int nr)
 
        if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
 
-       /* setup entry point of secondary processor */
-       switch (nr) {
-       case 2:
-               start = __secondary_start_psurge2;
-               break;
-       case 3:
-               start = __secondary_start_psurge3;
-               break;
-       }
-
-       out_be32(psurge_start, __pa(start));
+       out_be32(psurge_start, start);
        mb();
 
        psurge_set_ipi(nr);
@@ -500,14 +489,14 @@ static int __init smp_core99_probe(void)
        return ncpus;
 }
 
-static void __init smp_core99_kick_cpu(int nr)
+static void __devinit smp_core99_kick_cpu(int nr)
 {
        unsigned long save_vector, new_vector;
        unsigned long flags;
 
        volatile unsigned long *vector
                 = ((volatile unsigned long *)(KERNELBASE+0x100));
-       if (nr < 1 || nr > 3)
+       if (nr < 0 || nr > 3)
                return;
        if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
 
@@ -518,19 +507,9 @@ static void __init smp_core99_kick_cpu(int nr)
        save_vector = *vector;
 
        /* Setup fake reset vector that does    
-        *   b __secondary_start_psurge - KERNELBASE
+        *   b __secondary_start_pmac_0 + nr*8 - KERNELBASE
         */
-       switch(nr) {
-               case 1:
-                       new_vector = (unsigned long)__secondary_start_psurge;
-                       break;
-               case 2:
-                       new_vector = (unsigned long)__secondary_start_psurge2;
-                       break;
-               case 3:
-                       new_vector = (unsigned long)__secondary_start_psurge3;
-                       break;
-       }
+       new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
        *vector = 0x48000002 + new_vector - KERNELBASE;
 
        /* flush data cache and inval instruction cache */
@@ -554,7 +533,7 @@ static void __init smp_core99_kick_cpu(int nr)
        if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
 }
 
-static void __init smp_core99_setup_cpu(int cpu_nr)
+static void __devinit smp_core99_setup_cpu(int cpu_nr)
 {
        /* Setup L2/L3 */
        if (cpu_nr != 0)
@@ -668,3 +647,47 @@ struct smp_ops_t core99_smp_ops __pmacdata = {
        .give_timebase  = smp_core99_give_timebase,
        .take_timebase  = smp_core99_take_timebase,
 };
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+int __cpu_disable(void)
+{
+       cpu_clear(smp_processor_id(), cpu_online_map);
+
+       /* XXX reset cpu affinity here */
+       openpic_set_priority(0xf);
+       asm volatile("mtdec %0" : : "r" (0x7fffffff));
+       mb();
+       udelay(20);
+       asm volatile("mtdec %0" : : "r" (0x7fffffff));
+       return 0;
+}
+
+extern void low_cpu_die(void) __attribute__((noreturn)); /* in pmac_sleep.S */
+static int cpu_dead[NR_CPUS];
+
+void cpu_die(void)
+{
+       local_irq_disable();
+       cpu_dead[smp_processor_id()] = 1;
+       mb();
+       low_cpu_die();
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       int timeout;
+
+       timeout = 1000;
+       while (!cpu_dead[cpu]) {
+               if (--timeout == 0) {
+                       printk("CPU %u refused to die!\n", cpu);
+                       break;
+               }
+               msleep(1);
+       }
+       cpu_callin_map[cpu] = 0;
+       cpu_dead[cpu] = 0;
+}
+
+#endif
index 7747098..75fe8eb 100644 (file)
@@ -90,14 +90,10 @@ cpc700_mask_and_ack_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type cpc700_pic = {
-       "CPC700 PIC",
-       NULL,
-       NULL,
-       cpc700_unmask_irq,
-       cpc700_mask_irq,
-       cpc700_mask_and_ack_irq,
-       NULL,
-       NULL
+       .typename = "CPC700 PIC",
+       .enable = cpc700_unmask_irq,
+       .disable = cpc700_mask_irq,
+       .ack = cpc700_mask_and_ack_irq,
 };
 
 __init static void
index b9391e6..5c7908c 100644 (file)
@@ -129,14 +129,11 @@ static void i8259_end_irq(unsigned int irq)
 }
 
 struct hw_interrupt_type i8259_pic = {
-       " i8259    ",
-       NULL,
-       NULL,
-       i8259_unmask_irq,
-       i8259_mask_irq,
-       i8259_mask_and_ack_irq,
-       i8259_end_irq,
-       NULL
+       .typename = " i8259    ",
+       .enable = i8259_unmask_irq,
+       .disable = i8259_mask_irq,
+       .ack = i8259_mask_and_ack_irq,
+       .end = i8259_end_irq,
 };
 
 static struct resource pic1_iores = {
index 7e272c5..2e0ea92 100644 (file)
@@ -82,13 +82,11 @@ static void openpic2_end_irq(unsigned int irq_nr);
 static void openpic2_ack_irq(unsigned int irq_nr);
 
 struct hw_interrupt_type open_pic2 = {
-       " OpenPIC2 ",
-       NULL,
-       NULL,
-       openpic2_enable_irq,
-       openpic2_disable_irq,
-       openpic2_ack_irq,
-       openpic2_end_irq,
+       .typename = " OpenPIC2 ",
+       .enable = openpic2_enable_irq,
+       .disable = openpic2_disable_irq,
+       .ack = openpic2_ack_irq,
+       .end = openpic2_end_irq,
 };
 
 /*
index 06cb0af..ce4d1de 100644 (file)
@@ -34,13 +34,10 @@ static void ppc403_aic_disable(unsigned int irq);
 static void ppc403_aic_disable_and_ack(unsigned int irq);
 
 static struct hw_interrupt_type ppc403_aic = {
-       "403GC AIC",
-       NULL,
-       NULL,
-       ppc403_aic_enable,
-       ppc403_aic_disable,
-       ppc403_aic_disable_and_ack,
-       0
+       .typename = "403GC AIC",
+       .enable = ppc403_aic_enable,
+       .disable = ppc403_aic_disable,
+       .ack = ppc403_aic_disable_and_ack,
 };
 
 int
index e0bd66f..2cbcad2 100644 (file)
@@ -79,14 +79,11 @@ xilinx_intc_end(unsigned int irq)
 }
 
 static struct hw_interrupt_type xilinx_intc = {
-       "Xilinx Interrupt Controller",
-       NULL,
-       NULL,
-       xilinx_intc_enable,
-       xilinx_intc_disable,
-       xilinx_intc_disable_and_ack,
-       xilinx_intc_end,
-       0
+       .typename = "Xilinx Interrupt Controller",
+       .enable = xilinx_intc_enable,
+       .disable = xilinx_intc_disable,
+       .ack = xilinx_intc_disable_and_ack,
+       .end = xilinx_intc_end,
 };
 
 int
index 0a23aea..17d2c1e 100644 (file)
@@ -56,7 +56,7 @@ LDFLAGS_vmlinux       := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD)
 CFLAGS         += -msoft-float -pipe -mminimal-toc -mtraceback=none \
                   -mcall-aixdesc
 # Temporary hack until we have migrated to asm-powerpc
-CPPFLAGS       += -Iinclude3
+CPPFLAGS       += -Iarch/$(ARCH)/include
 
 GCC_VERSION     := $(call cc-version)
 GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;)
@@ -115,14 +115,15 @@ all: $(KBUILD_IMAGE)
 
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
-       $(Q)rm -rf include3
+       # Temporary hack until we have migrated to asm-powerpc
+       $(Q)rm -rf arch/$(ARCH)/include
 
 
 # Temporary hack until we have migrated to asm-powerpc
-include/asm: include3/asm
-include3/asm:
-       $(Q)if [ ! -d include3 ]; then mkdir -p include3; fi;
-       $(Q)ln -fsn $(srctree)/include/asm-powerpc include3/asm
+include/asm: arch/$(ARCH)/include/asm
+arch/$(ARCH)/include/asm:
+       $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
+       $(Q)ln -fsn $(srctree)/include/asm-powerpc arch/$(ARCH)/include/asm
 
 define archhelp
   echo  '  zImage.vmode        - Compressed kernel image (arch/$(ARCH)/boot/zImage.vmode)'
index bfce609..1fb673c 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/config.h>
 #include <asm/processor.h>
 #include <asm/page.h>
-#include <asm/ppc_asm.h>
 #include <asm/cputable.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
index 6e8d859..7b9d4da 100644 (file)
  * has a cmpxchg, and where atomic->value is an int holding
  * the value of the atomic (i.e. the high bits aren't used
  * for a lock or anything like that).
- *
- * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h
- * if spinlocks are empty and thus atomic_dec_and_lock is defined
- * to be atomic_dec_and_test - in that case we don't need it
- * defined here as well.
  */
-
-#ifndef ATOMIC_DEC_AND_LOCK
 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 {
        int counter;
@@ -52,4 +45,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif /* ATOMIC_DEC_AND_LOCK */
index ef70ef9..033643a 100644 (file)
 /* waiting for a spinlock... */
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 
-void __spin_yield(spinlock_t *lock)
+void __spin_yield(raw_spinlock_t *lock)
 {
        unsigned int lock_value, holder_cpu, yield_count;
        struct paca_struct *holder_paca;
 
-       lock_value = lock->lock;
+       lock_value = lock->slock;
        if (lock_value == 0)
                return;
        holder_cpu = lock_value & 0xffff;
@@ -38,7 +38,7 @@ void __spin_yield(spinlock_t *lock)
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
        rmb();
-       if (lock->lock != lock_value)
+       if (lock->slock != lock_value)
                return;         /* something has changed */
 #ifdef CONFIG_PPC_ISERIES
        HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
@@ -54,7 +54,7 @@ void __spin_yield(spinlock_t *lock)
  * This turns out to be the same for read and write locks, since
  * we only know the holder if it is write-locked.
  */
-void __rw_yield(rwlock_t *rw)
+void __rw_yield(raw_rwlock_t *rw)
 {
        int lock_value;
        unsigned int holder_cpu, yield_count;
@@ -82,9 +82,9 @@ void __rw_yield(rwlock_t *rw)
 }
 #endif
 
-void spin_unlock_wait(spinlock_t *lock)
+void __raw_spin_unlock_wait(raw_spinlock_t *lock)
 {
-       while (lock->lock) {
+       while (lock->slock) {
                HMT_low();
                if (SHARED_PROCESSOR)
                        __spin_yield(lock);
@@ -92,4 +92,4 @@ void spin_unlock_wait(spinlock_t *lock)
        HMT_medium();
 }
 
-EXPORT_SYMBOL(spin_unlock_wait);
+EXPORT_SYMBOL(__raw_spin_unlock_wait);
index 888b559..2dc14e9 100644 (file)
@@ -36,7 +36,7 @@ _diag44(void)
 }
 
 void
-_raw_spin_lock_wait(spinlock_t *lp, unsigned int pc)
+_raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
 {
        int count = spin_retry;
 
@@ -53,7 +53,7 @@ _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc)
 EXPORT_SYMBOL(_raw_spin_lock_wait);
 
 int
-_raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc)
+_raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
 {
        int count = spin_retry;
 
@@ -67,7 +67,7 @@ _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc)
 EXPORT_SYMBOL(_raw_spin_trylock_retry);
 
 void
-_raw_read_lock_wait(rwlock_t *rw)
+_raw_read_lock_wait(raw_rwlock_t *rw)
 {
        unsigned int old;
        int count = spin_retry;
@@ -86,7 +86,7 @@ _raw_read_lock_wait(rwlock_t *rw)
 EXPORT_SYMBOL(_raw_read_lock_wait);
 
 int
-_raw_read_trylock_retry(rwlock_t *rw)
+_raw_read_trylock_retry(raw_rwlock_t *rw)
 {
        unsigned int old;
        int count = spin_retry;
@@ -102,7 +102,7 @@ _raw_read_trylock_retry(rwlock_t *rw)
 EXPORT_SYMBOL(_raw_read_trylock_retry);
 
 void
-_raw_write_lock_wait(rwlock_t *rw)
+_raw_write_lock_wait(raw_rwlock_t *rw)
 {
        int count = spin_retry;
 
@@ -119,7 +119,7 @@ _raw_write_lock_wait(rwlock_t *rw)
 EXPORT_SYMBOL(_raw_write_lock_wait);
 
 int
-_raw_write_trylock_retry(rwlock_t *rw)
+_raw_write_trylock_retry(raw_rwlock_t *rw)
 {
        int count = spin_retry;
 
index ca91bb0..c0973f8 100644 (file)
@@ -37,13 +37,13 @@ static void end_maskreg_irq(unsigned int irq);
 
 /* hw_interrupt_type */
 static struct hw_interrupt_type maskreg_irq_type = {
-       " Mask Register",
-       startup_maskreg_irq,
-       shutdown_maskreg_irq,
-       enable_maskreg_irq,
-       disable_maskreg_irq,
-       mask_and_ack_maskreg,
-       end_maskreg_irq
+       .typename = " Mask Register",
+       .startup = startup_maskreg_irq,
+       .shutdown = shutdown_maskreg_irq,
+       .enable = enable_maskreg_irq,
+       .disable = disable_maskreg_irq,
+       .ack = mask_and_ack_maskreg,
+       .end = end_maskreg_irq
 };
 
 /* actual implementatin */
index 697144d..a9fde78 100644 (file)
@@ -37,10 +37,6 @@ static u8 bigsur_iomap_lo_shift[BIGSUR_IOMAP_LO_NMAP];
 static u32 bigsur_iomap_hi[BIGSUR_IOMAP_HI_NMAP];
 static u8 bigsur_iomap_hi_shift[BIGSUR_IOMAP_HI_NMAP];
 
-#ifndef MAX
-#define MAX(a,b)    ((a)>(b)?(a):(b))
-#endif
-
 void bigsur_port_map(u32 baseport, u32 nports, u32 addr, u8 shift)
 {
        u32 port, endport = baseport + nports;
@@ -57,7 +53,7 @@ void bigsur_port_map(u32 baseport, u32 nports, u32 addr, u8 shift)
                addr += (1<<(BIGSUR_IOMAP_LO_SHIFT));
        }
 
-       for (port = MAX(baseport, BIGSUR_IOMAP_LO_THRESH) ;
+       for (port = max_t(u32, baseport, BIGSUR_IOMAP_LO_THRESH);
             port < endport && port < BIGSUR_IOMAP_HI_THRESH ;
             port += (1<<BIGSUR_IOMAP_HI_SHIFT)) {
                pr_debug("    maphi[0x%x] = 0x%08x\n", port, addr);
@@ -80,7 +76,7 @@ void bigsur_port_unmap(u32 baseport, u32 nports)
                bigsur_iomap_lo[port>>BIGSUR_IOMAP_LO_SHIFT] = 0;
        }
 
-       for (port = MAX(baseport, BIGSUR_IOMAP_LO_THRESH) ;
+       for (port = max_t(u32, baseport, BIGSUR_IOMAP_LO_THRESH);
             port < endport && port < BIGSUR_IOMAP_HI_THRESH ;
             port += (1<<BIGSUR_IOMAP_HI_SHIFT)) {
                bigsur_iomap_hi[port>>BIGSUR_IOMAP_HI_SHIFT] = 0;
index c188fc3..6ddbcc7 100644 (file)
@@ -228,23 +228,23 @@ static void shutdown_bigsur_irq(unsigned int irq)
 
 /* Define the IRQ structures for the L1 and L2 IRQ types */
 static struct hw_interrupt_type bigsur_l1irq_type = {
-        "BigSur-CPLD-Level1-IRQ",
-        startup_bigsur_irq,
-        shutdown_bigsur_irq,
-        enable_bigsur_l1irq,
-        disable_bigsur_l1irq,
-        mask_and_ack_bigsur,
-        end_bigsur_irq
+       .typename  = "BigSur-CPLD-Level1-IRQ",
+       .startup = startup_bigsur_irq,
+       .shutdown = shutdown_bigsur_irq,
+       .enable = enable_bigsur_l1irq,
+       .disable = disable_bigsur_l1irq,
+       .ack = mask_and_ack_bigsur,
+       .end = end_bigsur_irq
 };
 
 static struct hw_interrupt_type bigsur_l2irq_type = {
-        "BigSur-CPLD-Level2-IRQ",
-        startup_bigsur_irq,
-        shutdown_bigsur_irq,
-        enable_bigsur_l2irq,
-        disable_bigsur_l2irq,
-        mask_and_ack_bigsur,
-        end_bigsur_irq
+       .typename  = "BigSur-CPLD-Level2-IRQ",
+       .startup = startup_bigsur_irq,
+       .shutdown  =shutdown_bigsur_irq,
+       .enable = enable_bigsur_l2irq,
+       .disable = disable_bigsur_l2irq,
+       .ack = mask_and_ack_bigsur,
+       .end = end_bigsur_irq
 };
 
 
index fa6cfe5..d1da0d8 100644 (file)
@@ -83,13 +83,13 @@ static void shutdown_cqreek_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type cqreek_irq_type = {
-       "CqREEK-IRQ",
-       startup_cqreek_irq,
-       shutdown_cqreek_irq,
-       enable_cqreek_irq,
-       disable_cqreek_irq,
-       mask_and_ack_cqreek,
-       end_cqreek_irq
+       .typename = "CqREEK-IRQ",
+       .startup = startup_cqreek_irq,
+       .shutdown = shutdown_cqreek_irq,
+       .enable = enable_cqreek_irq,
+       .disable = disable_cqreek_irq,
+       .ack = mask_and_ack_cqreek,
+       .end = end_cqreek_irq
 };
 
 int cqreek_has_ide, cqreek_has_isa;
index acd5848..52d0ba3 100644 (file)
@@ -39,13 +39,13 @@ static unsigned int startup_harp_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type harp_irq_type = {
-       "Harp-IRQ",
-       startup_harp_irq,
-       shutdown_harp_irq,
-       enable_harp_irq,
-       disable_harp_irq,
-       mask_and_ack_harp,
-       end_harp_irq
+       .typename = "Harp-IRQ",
+       .startup = startup_harp_irq,
+       .shutdown = shutdown_harp_irq,
+       .enable = enable_harp_irq,
+       .disable = disable_harp_irq,
+       .ack = mask_and_ack_harp,
+       .end = end_harp_irq
 };
 
 static void disable_harp_irq(unsigned int irq)
index 23adc6b..715e8fe 100644 (file)
@@ -86,13 +86,13 @@ static unsigned int startup_od_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type od_irq_type = {
-       "Overdrive-IRQ",
-       startup_od_irq,
-       shutdown_od_irq,
-       enable_od_irq,
-       disable_od_irq,
-       mask_and_ack_od,
-       end_od_irq
+       .typename = "Overdrive-IRQ",
+       .startup = startup_od_irq,
+       .shutdown = shutdown_od_irq,
+       .enable = enable_od_irq,
+       .disable = disable_od_irq,
+       .ack = mask_and_ack_od,
+       .end = end_od_irq
 };
 
 static void disable_od_irq(unsigned int irq)
index a7921f6..ed4c5b5 100644 (file)
@@ -74,13 +74,13 @@ static void end_hs7751rvoip_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type hs7751rvoip_irq_type = {
-       "HS7751RVoIP IRQ",
-       startup_hs7751rvoip_irq,
-       shutdown_hs7751rvoip_irq,
-       enable_hs7751rvoip_irq,
-       disable_hs7751rvoip_irq,
-       ack_hs7751rvoip_irq,
-       end_hs7751rvoip_irq,
+       .typename =  "HS7751RVoIP IRQ",
+       .startup = startup_hs7751rvoip_irq,
+       .shutdown = shutdown_hs7751rvoip_irq,
+       .enable = enable_hs7751rvoip_irq,
+       .disable = disable_hs7751rvoip_irq,
+       .ack = ack_hs7751rvoip_irq,
+       .end = end_hs7751rvoip_irq,
 };
 
 static void make_hs7751rvoip_irq(unsigned int irq)
index 95717f4..d36c937 100644 (file)
@@ -88,13 +88,13 @@ static void end_rts7751r2d_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type rts7751r2d_irq_type = {
-       "RTS7751R2D IRQ",
-       startup_rts7751r2d_irq,
-       shutdown_rts7751r2d_irq,
-       enable_rts7751r2d_irq,
-       disable_rts7751r2d_irq,
-       ack_rts7751r2d_irq,
-       end_rts7751r2d_irq,
+       .typename = "RTS7751R2D IRQ",
+       .startup = startup_rts7751r2d_irq,
+       .shutdown = shutdown_rts7751r2d_irq,
+       .enable = enable_rts7751r2d_irq,
+       .disable = disable_rts7751r2d_irq,
+       .ack = ack_rts7751r2d_irq,
+       .end = end_rts7751r2d_irq,
 };
 
 static void make_rts7751r2d_irq(unsigned int irq)
index 5675a41..7a2eb10 100644 (file)
@@ -35,13 +35,13 @@ static void end_systemh_irq(unsigned int irq);
 
 /* hw_interrupt_type */
 static struct hw_interrupt_type systemh_irq_type = {
-       " SystemH Register",
-       startup_systemh_irq,
-       shutdown_systemh_irq,
-       enable_systemh_irq,
-       disable_systemh_irq,
-       mask_and_ack_systemh,
-       end_systemh_irq
+       .typename = " SystemH Register",
+       .startup = startup_systemh_irq,
+       .shutdown = shutdown_systemh_irq,
+       .enable = enable_systemh_irq,
+       .disable = disable_systemh_irq,
+       .ack = mask_and_ack_systemh,
+       .end = end_systemh_irq
 };
 
 static unsigned int startup_systemh_irq(unsigned int irq)
index 1298883..1395c1e 100644 (file)
@@ -83,13 +83,13 @@ static unsigned int startup_microdev_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type microdev_irq_type = {
-       "MicroDev-IRQ",
-       startup_microdev_irq,
-       shutdown_microdev_irq,
-       enable_microdev_irq,
-       disable_microdev_irq,
-       mask_and_ack_microdev,
-       end_microdev_irq
+       .typename = "MicroDev-IRQ",
+       .startup = startup_microdev_irq,
+       .shutdown = shutdown_microdev_irq,
+       .enable = enable_microdev_irq,
+       .disable = disable_microdev_irq,
+       .ack = mask_and_ack_microdev,
+       .end = end_microdev_irq
 };
 
 static void disable_microdev_irq(unsigned int irq)
index 99ac709..84cb142 100644 (file)
@@ -48,10 +48,6 @@ static unsigned char hd64465_iomap_lo_shift[HD64465_IOMAP_LO_NMAP];
 static unsigned long   hd64465_iomap_hi[HD64465_IOMAP_HI_NMAP];
 static unsigned char   hd64465_iomap_hi_shift[HD64465_IOMAP_HI_NMAP];
 
-#ifndef MAX
-#define MAX(a,b)    ((a)>(b)?(a):(b))
-#endif
-
 #define PORT2ADDR(x) (sh_mv.mv_isa_port2addr(x))
 
 void hd64465_port_map(unsigned short baseport, unsigned int nports,
@@ -71,7 +67,7 @@ void hd64465_port_map(unsigned short baseport, unsigned int nports,
            addr += (1<<(HD64465_IOMAP_LO_SHIFT));
        }
 
-       for (port = MAX(baseport, HD64465_IOMAP_LO_THRESH) ;
+       for (port = max_t(unsigned int, baseport, HD64465_IOMAP_LO_THRESH);
             port < endport && port < HD64465_IOMAP_HI_THRESH ;
             port += (1<<HD64465_IOMAP_HI_SHIFT)) {
            DPRINTK("    maphi[0x%x] = 0x%08lx\n", port, addr);
@@ -95,7 +91,7 @@ void hd64465_port_unmap(unsigned short baseport, unsigned int nports)
            hd64465_iomap_lo[port>>HD64465_IOMAP_LO_SHIFT] = 0;
        }
 
-       for (port = MAX(baseport, HD64465_IOMAP_LO_THRESH) ;
+       for (port = max_t(unsigned int, baseport, HD64465_IOMAP_LO_THRESH);
             port < endport && port < HD64465_IOMAP_HI_THRESH ;
             port += (1<<HD64465_IOMAP_HI_SHIFT)) {
            hd64465_iomap_hi[port>>HD64465_IOMAP_HI_SHIFT] = 0;
index 3079234..1b6ac52 100644 (file)
@@ -87,13 +87,13 @@ static void shutdown_voyagergx_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type voyagergx_irq_type = {
-       "VOYAGERGX-IRQ",
-       startup_voyagergx_irq,
-       shutdown_voyagergx_irq,
-       enable_voyagergx_irq,
-       disable_voyagergx_irq,
-       mask_and_ack_voyagergx,
-       end_voyagergx_irq,
+       .typename = "VOYAGERGX-IRQ",
+       .startup = startup_voyagergx_irq,
+       .shutdown = shutdown_voyagergx_irq,
+       .enable = enable_voyagergx_irq,
+       .disable = disable_voyagergx_irq,
+       .ack = mask_and_ack_voyagergx,
+       .end = end_voyagergx_irq,
 };
 
 static irqreturn_t voyagergx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
index f76901e..a963d00 100644 (file)
@@ -46,13 +46,13 @@ static unsigned int startup_imask_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type imask_irq_type = {
-       "SR.IMASK",
-       startup_imask_irq,
-       shutdown_imask_irq,
-       enable_imask_irq,
-       disable_imask_irq,
-       mask_and_ack_imask,
-       end_imask_irq
+       .typename = "SR.IMASK",
+       .startup = startup_imask_irq,
+       .shutdown = shutdown_imask_irq,
+       .enable = enable_imask_irq,
+       .disable = disable_imask_irq,
+       .ack = mask_and_ack_imask,
+       .end = end_imask_irq
 };
 
 void static inline set_interrupt_registers(int ip)
index 7ea3d2d..71f9209 100644 (file)
@@ -48,13 +48,13 @@ static unsigned int startup_ipr_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type ipr_irq_type = {
-       "IPR-IRQ",
-       startup_ipr_irq,
-       shutdown_ipr_irq,
-       enable_ipr_irq,
-       disable_ipr_irq,
-       mask_and_ack_ipr,
-       end_ipr_irq
+       .typename = "IPR-IRQ",
+       .startup = startup_ipr_irq,
+       .shutdown = shutdown_ipr_irq,
+       .enable = enable_ipr_irq,
+       .disable = disable_ipr_irq,
+       .ack = mask_and_ack_ipr,
+       .end = end_ipr_irq
 };
 
 static void disable_ipr_irq(unsigned int irq)
@@ -142,13 +142,13 @@ static unsigned int startup_pint_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type pint_irq_type = {
-       "PINT-IRQ",
-       startup_pint_irq,
-       shutdown_pint_irq,
-       enable_pint_irq,
-       disable_pint_irq,
-       mask_and_ack_pint,
-       end_pint_irq
+       .typename = "PINT-IRQ",
+       .startup = startup_pint_irq,
+       .shutdown = shutdown_pint_irq,
+       .enable = enable_pint_irq,
+       .disable = disable_pint_irq,
+       .ack = mask_and_ack_pint,
+       .end = end_pint_irq
 };
 
 static void disable_pint_irq(unsigned int irq)
index 099ebbf..f6b16ba 100644 (file)
@@ -48,13 +48,13 @@ static unsigned int startup_intc2_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type intc2_irq_type = {
-       "INTC2-IRQ",
-       startup_intc2_irq,
-       shutdown_intc2_irq,
-       enable_intc2_irq,
-       disable_intc2_irq,
-       mask_and_ack_intc2,
-       end_intc2_irq
+       .typename = "INTC2-IRQ",
+       .startup = startup_intc2_irq,
+       .shutdown = shutdown_intc2_irq,
+       .enable = enable_intc2_irq,
+       .disable = disable_intc2_irq,
+       .ack = mask_and_ack_intc2,
+       .end = end_intc2_irq
 };
 
 static void disable_intc2_irq(unsigned int irq)
index 43f88f3..fc99bf4 100644 (file)
@@ -107,13 +107,13 @@ static void mask_and_ack_intc(unsigned int);
 static void end_intc_irq(unsigned int irq);
 
 static struct hw_interrupt_type intc_irq_type = {
-       "INTC",
-       startup_intc_irq,
-       shutdown_intc_irq,
-       enable_intc_irq,
-       disable_intc_irq,
-       mask_and_ack_intc,
-       end_intc_irq
+       .typename = "INTC",
+       .startup = startup_intc_irq,
+       .shutdown = shutdown_intc_irq,
+       .enable = enable_intc_irq,
+       .disable = disable_intc_irq,
+       .ack = mask_and_ack_intc,
+       .end = end_intc_irq
 };
 
 static int irlm;               /* IRL mode */
index 5d974a2..f848093 100644 (file)
@@ -114,17 +114,7 @@ DOT_ALIAS2(unsigned, urem, unsigned, unsigned)
 /* used by various drivers */
 EXPORT_SYMBOL(sparc_cpu_model);
 EXPORT_SYMBOL(kernel_thread);
-#ifdef CONFIG_DEBUG_SPINLOCK
 #ifdef CONFIG_SMP
-EXPORT_SYMBOL(_do_spin_lock);
-EXPORT_SYMBOL(_do_spin_unlock);
-EXPORT_SYMBOL(_spin_trylock);
-EXPORT_SYMBOL(_do_read_lock);
-EXPORT_SYMBOL(_do_read_unlock);
-EXPORT_SYMBOL(_do_write_lock);
-EXPORT_SYMBOL(_do_write_unlock);
-#endif
-#else
 // XXX find what uses (or used) these.
 EXPORT_SYMBOL(___rw_read_enter);
 EXPORT_SYMBOL(___rw_read_exit);
index 2296ff9..fa50069 100644 (file)
@@ -9,5 +9,3 @@ lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
         strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
         copy_user.o locks.o atomic.o atomic32.o bitops.o \
         lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
-
-lib-$(CONFIG_DEBUG_SPINLOCK) +=        debuglocks.o
diff --git a/arch/sparc/lib/debuglocks.c b/arch/sparc/lib/debuglocks.c
deleted file mode 100644 (file)
index fb18235..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-/* $Id: debuglocks.c,v 1.11 2001/09/20 00:35:31 davem Exp $
- * debuglocks.c: Debugging versions of SMP locking primitives.
- *
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au)
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/threads.h>     /* For NR_CPUS */
-#include <linux/spinlock.h>
-#include <asm/psr.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_SMP
-
-/* Some notes on how these debugging routines work.  When a lock is acquired
- * an extra debugging member lock->owner_pc is set to the caller of the lock
- * acquisition routine.  Right before releasing a lock, the debugging program
- * counter is cleared to zero.
- *
- * Furthermore, since PC's are 4 byte aligned on Sparc, we stuff the CPU
- * number of the owner in the lowest two bits.
- */
-
-#define STORE_CALLER(A) __asm__ __volatile__("mov %%i7, %0" : "=r" (A));
-
-static inline void show(char *str, spinlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-
-       printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n",str,
-               lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
-}
-
-static inline void show_read(char *str, rwlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-
-       printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n", str,
-               lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
-}
-
-static inline void show_write(char *str, rwlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-       int i;
-
-       printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)", str,
-               lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
-
-       for(i = 0; i < NR_CPUS; i++)
-               printk(" reader[%d]=%08lx", i, lock->reader_pc[i]);
-
-       printk("\n");
-}
-
-#undef INIT_STUCK
-#define INIT_STUCK 100000000
-
-void _do_spin_lock(spinlock_t *lock, char *str)
-{
-       unsigned long caller;
-       unsigned long val;
-       int cpu = smp_processor_id();
-       int stuck = INIT_STUCK;
-
-       STORE_CALLER(caller);
-
-again:
-       __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock)));
-       if(val) {
-               while(lock->lock) {
-                       if (!--stuck) {
-                               show(str, lock, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       barrier();
-               }
-               goto again;
-       }
-       lock->owner_pc = (cpu & 3) | (caller & ~3);
-}
-
-int _spin_trylock(spinlock_t *lock)
-{
-       unsigned long val;
-       unsigned long caller;
-       int cpu = smp_processor_id();
-
-       STORE_CALLER(caller);
-
-       __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock)));
-       if(!val) {
-               /* We got it, record our identity for debugging. */
-               lock->owner_pc = (cpu & 3) | (caller & ~3);
-       }
-       return val == 0;
-}
-
-void _do_spin_unlock(spinlock_t *lock)
-{
-       lock->owner_pc = 0;
-       barrier();
-       lock->lock = 0;
-}
-
-void _do_read_lock(rwlock_t *rw, char *str)
-{
-       unsigned long caller;
-       unsigned long val;
-       int cpu = smp_processor_id();
-       int stuck = INIT_STUCK;
-
-       STORE_CALLER(caller);
-
-wlock_again:
-       __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
-       if(val) {
-               while(rw->lock & 0xff) {
-                       if (!--stuck) {
-                               show_read(str, rw, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       barrier();
-               }
-               goto wlock_again;
-       }
-
-       rw->reader_pc[cpu] = caller;
-       barrier();
-       rw->lock++;
-}
-
-void _do_read_unlock(rwlock_t *rw, char *str)
-{
-       unsigned long caller;
-       unsigned long val;
-       int cpu = smp_processor_id();
-       int stuck = INIT_STUCK;
-
-       STORE_CALLER(caller);
-
-wlock_again:
-       __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
-       if(val) {
-               while(rw->lock & 0xff) {
-                       if (!--stuck) {
-                               show_read(str, rw, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       barrier();
-               }
-               goto wlock_again;
-       }
-
-       rw->reader_pc[cpu] = 0;
-       barrier();
-       rw->lock -= 0x1ff;
-}
-
-void _do_write_lock(rwlock_t *rw, char *str)
-{
-       unsigned long caller;
-       unsigned long val;
-       int cpu = smp_processor_id();
-       int stuck = INIT_STUCK;
-
-       STORE_CALLER(caller);
-
-wlock_again:
-       __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
-       if(val) {
-wlock_wait:
-               while(rw->lock) {
-                       if (!--stuck) {
-                               show_write(str, rw, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       barrier();
-               }
-               goto wlock_again;
-       }
-
-       if (rw->lock & ~0xff) {
-               *(((unsigned char *)&rw->lock)+3) = 0;
-               barrier();
-               goto wlock_wait;
-       }
-
-       barrier();
-       rw->owner_pc = (cpu & 3) | (caller & ~3);
-}
-
-void _do_write_unlock(rwlock_t *rw)
-{
-       rw->owner_pc = 0;
-       barrier();
-       rw->lock = 0;
-}
-
-#endif /* SMP */
index 6625543..7d10b03 100644 (file)
@@ -607,11 +607,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
        struct thread_info *t = p->thread_info;
        char *child_trap_frame;
 
-#ifdef CONFIG_DEBUG_SPINLOCK
-       p->thread.smp_lock_count = 0;
-       p->thread.smp_lock_pc = 0;
-#endif
-
        /* Calculate offset to stack_frame & pt_regs */
        child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
        memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
index 7d9a0f6..cbb5e59 100644 (file)
@@ -115,17 +115,12 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data);
 
 /* used by various drivers */
 #ifdef CONFIG_SMP
-#ifndef CONFIG_DEBUG_SPINLOCK
 /* Out of line rw-locking implementation. */
 EXPORT_SYMBOL(__read_lock);
 EXPORT_SYMBOL(__read_unlock);
 EXPORT_SYMBOL(__write_lock);
 EXPORT_SYMBOL(__write_unlock);
 EXPORT_SYMBOL(__write_trylock);
-/* Out of line spin-locking implementation. */
-EXPORT_SYMBOL(_raw_spin_lock);
-EXPORT_SYMBOL(_raw_spin_lock_flags);
-#endif
 
 /* Hard IRQ locking */
 EXPORT_SYMBOL(synchronize_irq);
index 40dbeec..d968aeb 100644 (file)
@@ -14,7 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
         copy_in_user.o user_fixup.o memmove.o \
         mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
 
-lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
 lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
 
 obj-y += iomap.o
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c
deleted file mode 100644 (file)
index f5f0b55..0000000
+++ /dev/null
@@ -1,366 +0,0 @@
-/* $Id: debuglocks.c,v 1.9 2001/11/17 00:10:48 davem Exp $
- * debuglocks.c: Debugging versions of SMP locking primitives.
- *
- * Copyright (C) 1998 David S. Miller (davem@redhat.com)
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_SMP
-
-static inline void show (char *str, spinlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-
-       printk("%s(%p) CPU#%d stuck at %08x, owner PC(%08x):CPU(%x)\n",
-              str, lock, cpu, (unsigned int) caller,
-              lock->owner_pc, lock->owner_cpu);
-}
-
-static inline void show_read (char *str, rwlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-
-       printk("%s(%p) CPU#%d stuck at %08x, writer PC(%08x):CPU(%x)\n",
-              str, lock, cpu, (unsigned int) caller,
-              lock->writer_pc, lock->writer_cpu);
-}
-
-static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-       int i;
-
-       printk("%s(%p) CPU#%d stuck at %08x\n",
-              str, lock, cpu, (unsigned int) caller);
-       printk("Writer: PC(%08x):CPU(%x)\n",
-              lock->writer_pc, lock->writer_cpu);
-       printk("Readers:");
-       for (i = 0; i < NR_CPUS; i++)
-               if (lock->reader_pc[i])
-                       printk(" %d[%08x]", i, lock->reader_pc[i]);
-       printk("\n");
-}
-
-#undef INIT_STUCK
-#define INIT_STUCK 100000000
-
-void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int cpu = get_cpu();
-       int shown = 0;
-
-again:
-       __asm__ __volatile__("ldstub [%1], %0"
-                            : "=r" (val)
-                            : "r" (&(lock->lock))
-                            : "memory");
-       membar_storeload_storestore();
-       if (val) {
-               while (lock->lock) {
-                       if (!--stuck) {
-                               if (shown++ <= 2)
-                                       show(str, lock, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       rmb();
-               }
-               goto again;
-       }
-       lock->owner_pc = ((unsigned int)caller);
-       lock->owner_cpu = cpu;
-       current->thread.smp_lock_count++;
-       current->thread.smp_lock_pc = ((unsigned int)caller);
-
-       put_cpu();
-}
-
-int _do_spin_trylock(spinlock_t *lock, unsigned long caller)
-{
-       unsigned long val;
-       int cpu = get_cpu();
-
-       __asm__ __volatile__("ldstub [%1], %0"
-                            : "=r" (val)
-                            : "r" (&(lock->lock))
-                            : "memory");
-       membar_storeload_storestore();
-       if (!val) {
-               lock->owner_pc = ((unsigned int)caller);
-               lock->owner_cpu = cpu;
-               current->thread.smp_lock_count++;
-               current->thread.smp_lock_pc = ((unsigned int)caller);
-       }
-
-       put_cpu();
-
-       return val == 0;
-}
-
-void _do_spin_unlock(spinlock_t *lock)
-{
-       lock->owner_pc = 0;
-       lock->owner_cpu = NO_PROC_ID;
-       membar_storestore_loadstore();
-       lock->lock = 0;
-       current->thread.smp_lock_count--;
-}
-
-/* Keep INIT_STUCK the same... */
-
-void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int cpu = get_cpu();
-       int shown = 0;
-
-wlock_again:
-       /* Wait for any writer to go away.  */
-       while (((long)(rw->lock)) < 0) {
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_read(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               rmb();
-       }
-       /* Try once to increment the counter.  */
-       __asm__ __volatile__(
-"      ldx             [%0], %%g1\n"
-"      brlz,a,pn       %%g1, 2f\n"
-"       mov            1, %0\n"
-"      add             %%g1, 1, %%g7\n"
-"      casx            [%0], %%g1, %%g7\n"
-"      sub             %%g1, %%g7, %0\n"
-"2:"   : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g1", "g7", "memory");
-       membar_storeload_storestore();
-       if (val)
-               goto wlock_again;
-       rw->reader_pc[cpu] = ((unsigned int)caller);
-       current->thread.smp_lock_count++;
-       current->thread.smp_lock_pc = ((unsigned int)caller);
-
-       put_cpu();
-}
-
-void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int cpu = get_cpu();
-       int shown = 0;
-
-       /* Drop our identity _first_. */
-       rw->reader_pc[cpu] = 0;
-       current->thread.smp_lock_count--;
-runlock_again:
-       /* Spin trying to decrement the counter using casx.  */
-       __asm__ __volatile__(
-"      membar  #StoreLoad | #LoadLoad\n"
-"      ldx     [%0], %%g1\n"
-"      sub     %%g1, 1, %%g7\n"
-"      casx    [%0], %%g1, %%g7\n"
-"      membar  #StoreLoad | #StoreStore\n"
-"      sub     %%g1, %%g7, %0\n"
-       : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g1", "g7", "memory");
-       if (val) {
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_read(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               goto runlock_again;
-       }
-
-       put_cpu();
-}
-
-void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int cpu = get_cpu();
-       int shown = 0;
-
-wlock_again:
-       /* Spin while there is another writer. */
-       while (((long)rw->lock) < 0) {
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_write(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               rmb();
-       }
-
-       /* Try to acuire the write bit.  */
-       __asm__ __volatile__(
-"      mov     1, %%g3\n"
-"      sllx    %%g3, 63, %%g3\n"
-"      ldx     [%0], %%g1\n"
-"      brlz,pn %%g1, 1f\n"
-"       or     %%g1, %%g3, %%g7\n"
-"      casx    [%0], %%g1, %%g7\n"
-"      membar  #StoreLoad | #StoreStore\n"
-"      ba,pt   %%xcc, 2f\n"
-"       sub    %%g1, %%g7, %0\n"
-"1:    mov     1, %0\n"
-"2:"   : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g3", "g1", "g7", "memory");
-       if (val) {
-               /* We couldn't get the write bit. */
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_write(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               goto wlock_again;
-       }
-       if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
-               /* Readers still around, drop the write
-                * lock, spin, and try again.
-                */
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_write(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               __asm__ __volatile__(
-"              mov     1, %%g3\n"
-"              sllx    %%g3, 63, %%g3\n"
-"1:            ldx     [%0], %%g1\n"
-"              andn    %%g1, %%g3, %%g7\n"
-"              casx    [%0], %%g1, %%g7\n"
-"              cmp     %%g1, %%g7\n"
-"              membar  #StoreLoad | #StoreStore\n"
-"              bne,pn  %%xcc, 1b\n"
-"               nop"
-               : /* no outputs */
-               : "r" (&(rw->lock))
-               : "g3", "g1", "g7", "cc", "memory");
-               while(rw->lock != 0) {
-                       if (!--stuck) {
-                               if (shown++ <= 2)
-                                       show_write(str, rw, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       rmb();
-               }
-               goto wlock_again;
-       }
-
-       /* We have it, say who we are. */
-       rw->writer_pc = ((unsigned int)caller);
-       rw->writer_cpu = cpu;
-       current->thread.smp_lock_count++;
-       current->thread.smp_lock_pc = ((unsigned int)caller);
-
-       put_cpu();
-}
-
-void _do_write_unlock(rwlock_t *rw, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int shown = 0;
-
-       /* Drop our identity _first_ */
-       rw->writer_pc = 0;
-       rw->writer_cpu = NO_PROC_ID;
-       current->thread.smp_lock_count--;
-wlock_again:
-       __asm__ __volatile__(
-"      membar  #StoreLoad | #LoadLoad\n"
-"      mov     1, %%g3\n"
-"      sllx    %%g3, 63, %%g3\n"
-"      ldx     [%0], %%g1\n"
-"      andn    %%g1, %%g3, %%g7\n"
-"      casx    [%0], %%g1, %%g7\n"
-"      membar  #StoreLoad | #StoreStore\n"
-"      sub     %%g1, %%g7, %0\n"
-       : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g3", "g1", "g7", "memory");
-       if (val) {
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_write("write_unlock", rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               goto wlock_again;
-       }
-}
-
-int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int cpu = get_cpu();
-
-       /* Try to acuire the write bit.  */
-       __asm__ __volatile__(
-"      mov     1, %%g3\n"
-"      sllx    %%g3, 63, %%g3\n"
-"      ldx     [%0], %%g1\n"
-"      brlz,pn %%g1, 1f\n"
-"       or     %%g1, %%g3, %%g7\n"
-"      casx    [%0], %%g1, %%g7\n"
-"      membar  #StoreLoad | #StoreStore\n"
-"      ba,pt   %%xcc, 2f\n"
-"       sub    %%g1, %%g7, %0\n"
-"1:    mov     1, %0\n"
-"2:"   : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g3", "g1", "g7", "memory");
-
-       if (val) {
-               put_cpu();
-               return 0;
-       }
-
-       if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
-               /* Readers still around, drop the write
-                * lock, return failure.
-                */
-               __asm__ __volatile__(
-"              mov     1, %%g3\n"
-"              sllx    %%g3, 63, %%g3\n"
-"1:            ldx     [%0], %%g1\n"
-"              andn    %%g1, %%g3, %%g7\n"
-"              casx    [%0], %%g1, %%g7\n"
-"              cmp     %%g1, %%g7\n"
-"              membar  #StoreLoad | #StoreStore\n"
-"              bne,pn  %%xcc, 1b\n"
-"               nop"
-               : /* no outputs */
-               : "r" (&(rw->lock))
-               : "g3", "g1", "g7", "cc", "memory");
-
-               put_cpu();
-
-               return 0;
-       }
-
-       /* We have it, say who we are. */
-       rw->writer_pc = ((unsigned int)caller);
-       rw->writer_cpu = cpu;
-       current->thread.smp_lock_count++;
-       current->thread.smp_lock_pc = ((unsigned int)caller);
-
-       put_cpu();
-
-       return 1;
-}
-
-#endif /* CONFIG_SMP */
index 577b8d1..154803a 100644 (file)
@@ -103,7 +103,6 @@ endef
 
 ifneq ($(KBUILD_SRC),)
 $(shell mkdir -p $(ARCH_DIR) && ln -fsn $(srctree)/$(ARCH_DIR)/Kconfig.$(SUBARCH) $(ARCH_DIR)/Kconfig.arch)
-CLEAN_FILES += $(ARCH_DIR)/Kconfig.arch
 else
 $(shell cd $(ARCH_DIR) && ln -sf Kconfig.$(SUBARCH) Kconfig.arch)
 endif
@@ -144,14 +143,14 @@ endef
 #TT or skas makefiles and don't clean skas_ptregs.h.
 CLEAN_FILES += linux x.i gmon.out $(ARCH_DIR)/include/uml-config.h \
        $(GEN_HEADERS) $(ARCH_DIR)/include/skas_ptregs.h \
-       $(ARCH_DIR)/include/user_constants.h
+       $(ARCH_DIR)/include/user_constants.h $(ARCH_DIR)/Kconfig.arch
 
 MRPROPER_FILES += $(SYMLINK_HEADERS) $(ARCH_SYMLINKS) \
-       $(addprefix $(ARCH_DIR)/kernel/,$(KERN_SYMLINKS)) $(ARCH_DIR)/os \
-       $(ARCH_DIR)/Kconfig.arch
+       $(addprefix $(ARCH_DIR)/kernel/,$(KERN_SYMLINKS)) $(ARCH_DIR)/os
 
 archclean:
        $(Q)$(MAKE) $(clean)=$(ARCH_DIR)/util
+       $(Q)$(MAKE) $(clean)=$(ARCH_DIR)/os-$(OS)/util
        @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
                -o -name '*.gcov' \) -type f -print | xargs rm -f
 
index 99d3ad4..e8ff0d8 100644 (file)
@@ -13,7 +13,17 @@ extern int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w);
 extern int is_remapped(void *virt);
 extern int physmem_remove_mapping(void *virt);
 extern void physmem_forget_descriptor(int fd);
-extern unsigned long to_phys(void *virt);
+
+extern unsigned long uml_physmem;
+static inline unsigned long to_phys(void *virt)
+{
+       return(((unsigned long) virt) - uml_physmem);
+}
+
+static inline void *to_virt(unsigned long phys)
+{
+       return((void *) uml_physmem + phys);
+}
 
 #endif
 
index 3942a5f..2517ecb 100644 (file)
@@ -146,37 +146,8 @@ SECTIONS
   }
   _end = .;
   PROVIDE (end = .);
-   /* Stabs debugging sections.  */
-  .stab          0 : { *(.stab) }
-  .stabstr       0 : { *(.stabstr) }
-  .stab.excl     0 : { *(.stab.excl) }
-  .stab.exclstr  0 : { *(.stab.exclstr) }
-  .stab.index    0 : { *(.stab.index) }
-  .stab.indexstr 0 : { *(.stab.indexstr) }
-  .comment       0 : { *(.comment) }
-  /* DWARF debug sections.
-     Symbols in the DWARF debugging sections are relative to the beginning
-     of the section so we begin them at 0.  */
-  /* DWARF 1 */
-  .debug          0 : { *(.debug) }
-  .line           0 : { *(.line) }
-  /* GNU DWARF 1 extensions */
-  .debug_srcinfo  0 : { *(.debug_srcinfo) }
-  .debug_sfnames  0 : { *(.debug_sfnames) }
-  /* DWARF 1.1 and DWARF 2 */
-  .debug_aranges  0 : { *(.debug_aranges) }
-  .debug_pubnames 0 : { *(.debug_pubnames) }
-  /* DWARF 2 */
-  .debug_info     0 : { *(.debug_info .gnu.linkonce.wi.*) }
-  .debug_abbrev   0 : { *(.debug_abbrev) }
-  .debug_line     0 : { *(.debug_line) }
-  .debug_frame    0 : { *(.debug_frame) }
-  .debug_str      0 : { *(.debug_str) }
-  .debug_loc      0 : { *(.debug_loc) }
-  .debug_macinfo  0 : { *(.debug_macinfo) }
-  /* SGI/MIPS DWARF 2 extensions */
-  .debug_weaknames 0 : { *(.debug_weaknames) }
-  .debug_funcnames 0 : { *(.debug_funcnames) }
-  .debug_typenames 0 : { *(.debug_typenames) }
-  .debug_varnames  0 : { *(.debug_varnames) }
+
+  STABS_DEBUG
+
+  DWARF_DEBUG
 }
index 32d3076..a97a72e 100644 (file)
@@ -34,14 +34,9 @@ EXPORT_SYMBOL(host_task_size);
 EXPORT_SYMBOL(arch_validate);
 EXPORT_SYMBOL(get_kmem_end);
 
-EXPORT_SYMBOL(page_to_phys);
-EXPORT_SYMBOL(phys_to_page);
 EXPORT_SYMBOL(high_physmem);
 EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(um_virt_to_phys);
-EXPORT_SYMBOL(__virt_to_page);
-EXPORT_SYMBOL(to_phys);
-EXPORT_SYMBOL(to_virt);
 EXPORT_SYMBOL(mode_tt);
 EXPORT_SYMBOL(handle_page_fault);
 EXPORT_SYMBOL(find_iomem);
index a24e3b7..ea670fc 100644 (file)
@@ -248,16 +248,6 @@ unsigned long high_physmem;
 
 extern unsigned long physmem_size;
 
-void *to_virt(unsigned long phys)
-{
-       return((void *) uml_physmem + phys);
-}
-
-unsigned long to_phys(void *virt)
-{
-       return(((unsigned long) virt) - uml_physmem);
-}
-
 int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
 {
        struct page *p, *map;
@@ -298,31 +288,6 @@ int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
        return(0);
 }
 
-struct page *phys_to_page(const unsigned long phys)
-{
-       return(&mem_map[phys >> PAGE_SHIFT]);
-}
-
-struct page *__virt_to_page(const unsigned long virt)
-{
-       return(&mem_map[__pa(virt) >> PAGE_SHIFT]);
-}
-
-phys_t page_to_phys(struct page *page)
-{
-       return((page - mem_map) << PAGE_SHIFT);
-}
-
-pte_t mk_pte(struct page *page, pgprot_t pgprot)
-{
-       pte_t pte;
-
-       pte_set_val(pte, page_to_phys(page), pgprot);
-       if(pte_present(pte))
-               pte_mknewprot(pte_mknewpage(pte));
-       return(pte);
-}
-
 /* Changed during early boot */
 static unsigned long kmem_top = 0;
 
index b5fc89f..87cc6fd 100644 (file)
@@ -57,7 +57,8 @@ good_area:
        if(is_write && !(vma->vm_flags & VM_WRITE)) 
                goto out;
 
-        if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+       /* Don't require VM_READ|VM_EXEC for write faults! */
+        if(!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC)))
                 goto out;
 
        do {
@@ -84,8 +85,7 @@ survive:
                pte = pte_offset_kernel(pmd, address);
        } while(!pte_present(*pte));
        err = 0;
-       *pte = pte_mkyoung(*pte);
-       if(pte_write(*pte)) *pte = pte_mkdirty(*pte);
+       WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte)));
        flush_tlb_page(vma, address);
 out:
        up_read(&mm->mmap_sem);
index b03326d..af11915 100644 (file)
@@ -93,14 +93,10 @@ SECTIONS
    *(.bss)
    *(COMMON)
   }
-  _end = . ;
+  _end = .;
   PROVIDE (end = .);
-  /* Stabs debugging sections.  */
-  .stab 0 : { *(.stab) }
-  .stabstr 0 : { *(.stabstr) }
-  .stab.excl 0 : { *(.stab.excl) }
-  .stab.exclstr 0 : { *(.stab.exclstr) }
-  .stab.index 0 : { *(.stab.index) }
-  .stab.indexstr 0 : { *(.stab.indexstr) }
-  .comment 0 : { *(.comment) }
+
+  STABS_DEBUG
+
+  DWARF_DEBUG
 }
index 336cbf2..9e85969 100644 (file)
@@ -67,13 +67,13 @@ static void ack_none(unsigned int irq)
 #define end_none       enable_none
 
 struct hw_interrupt_type no_irq_type = {
-       "none",
-       startup_none,
-       shutdown_none,
-       enable_none,
-       disable_none,
-       ack_none,
-       end_none
+       .typename = "none",
+       .startup = startup_none,
+       .shutdown = shutdown_none,
+       .enable = enable_none,
+       .disable = disable_none,
+       .ack = ack_none,
+       .end = end_none
 };
 
 volatile unsigned long irq_err_count, spurious_count;
index abd4840..62bdb8d 100644 (file)
@@ -138,13 +138,13 @@ static void nmi_end (unsigned irq)
 }
 
 static struct hw_interrupt_type nmi_irq_type = {
-       "NMI",
-       irq_zero,               /* startup */
-       irq_nop,                /* shutdown */
-       irq_nop,                /* enable */
-       irq_nop,                /* disable */
-       irq_nop,                /* ack */
-       nmi_end,                /* end */
+       .typename = "NMI",
+       .startup = irq_zero,            /* startup */
+       .shutdown = irq_nop,            /* shutdown */
+       .enable = irq_nop,              /* enable */
+       .disable = irq_nop,             /* disable */
+       .ack = irq_nop,         /* ack */
+       .end = nmi_end,         /* end */
 };
 
 void __init init_IRQ (void)
index e2cc558..17049aa 100644 (file)
@@ -73,13 +73,13 @@ static void irq_nop (unsigned irq) { }
 static unsigned irq_zero (unsigned irq) { return 0; }
 
 static struct hw_interrupt_type sim_irq_type = {
-       "IRQ",
-       irq_zero,               /* startup */
-       irq_nop,                /* shutdown */
-       irq_nop,                /* enable */
-       irq_nop,                /* disable */
-       irq_nop,                /* ack */
-       irq_nop,                /* end */
+       .typename = "IRQ",
+       .startup = irq_zero,            /* startup */
+       .shutdown = irq_nop,            /* shutdown */
+       .enable = irq_nop,              /* enable */
+       .disable = irq_nop,             /* disable */
+       .ack = irq_nop,         /* ack */
+       .end = irq_nop,         /* end */
 };
 
 void __init mach_init_irqs (void)
index 0511d80..9aec524 100644 (file)
@@ -929,7 +929,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
        c->x86_num_cores = intel_num_cpu_cores(c);
 }
 
-void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
 {
        char *v = c->x86_vendor_id;
 
index d4abb07..6dd642c 100644 (file)
@@ -194,20 +194,7 @@ SECTIONS
 #endif
        }
 
-  /* DWARF 2 */
-  .debug_info     0 : { *(.debug_info) }
-  .debug_abbrev   0 : { *(.debug_abbrev) }
-  .debug_line     0 : { *(.debug_line) }
-  .debug_frame    0 : { *(.debug_frame) }
-  .debug_str      0 : { *(.debug_str) }
-  .debug_loc      0 : { *(.debug_loc) }
-  .debug_macinfo  0 : { *(.debug_macinfo) }
-  /* SGI/MIPS DWARF 2 extensions */
-  .debug_weaknames 0 : { *(.debug_weaknames) }
-  .debug_funcnames 0 : { *(.debug_funcnames) }
-  .debug_typenames 0 : { *(.debug_typenames) }
-  .debug_varnames  0 : { *(.debug_varnames) }
-
-
-  .comment 0 : { *(.comment) }
+  STABS_DEBUG
+
+  DWARF_DEBUG
 }
index 7249ba2..aee50b4 100644 (file)
@@ -23,7 +23,6 @@ u8 sleep_states[ACPI_S_STATE_COUNT];
 
 static struct pm_ops acpi_pm_ops;
 
-extern void do_suspend_lowlevel_s4bios(void);
 extern void do_suspend_lowlevel(void);
 
 static u32 acpi_suspend_states[] = {
@@ -98,8 +97,6 @@ static int acpi_pm_enter(suspend_state_t pm_state)
        case PM_SUSPEND_DISK:
                if (acpi_pm_ops.pm_disk_mode == PM_DISK_PLATFORM)
                        status = acpi_enter_sleep_state(acpi_state);
-               else
-                       do_suspend_lowlevel_s4bios();
                break;
        case PM_SUSPEND_MAX:
                acpi_power_off();
@@ -206,11 +203,6 @@ static int __init acpi_sleep_init(void)
                        printk(" S%d", i);
                }
                if (i == ACPI_STATE_S4) {
-                       if (acpi_gbl_FACS->S4bios_f) {
-                               sleep_states[i] = 1;
-                               printk(" S4bios");
-                               acpi_pm_ops.pm_disk_mode = PM_DISK_FIRMWARE;
-                       }
                        if (sleep_states[i])
                                acpi_pm_ops.pm_disk_mode = PM_DISK_PLATFORM;
                }
index a5f947d..af7935a 100644 (file)
@@ -21,9 +21,7 @@ int acpi_sleep_prepare(u32 acpi_state)
 {
 #ifdef CONFIG_ACPI_SLEEP
        /* do we have a wakeup address for S2 and S3? */
-       /* Here, we support only S4BIOS, those we set the wakeup address */
-       /* S4OS is only supported for now via swsusp.. */
-       if (acpi_state == ACPI_STATE_S3 || acpi_state == ACPI_STATE_S4) {
+       if (acpi_state == ACPI_STATE_S3) {
                if (!acpi_wakeup_address) {
                        return -EFAULT;
                }
index 09a603f..4696a85 100644 (file)
@@ -25,8 +25,6 @@ static int acpi_system_sleep_seq_show(struct seq_file *seq, void *offset)
        for (i = 0; i <= ACPI_STATE_S5; i++) {
                if (sleep_states[i]) {
                        seq_printf(seq, "S%d ", i);
-                       if (i == ACPI_STATE_S4 && acpi_gbl_FACS->S4bios_f)
-                               seq_printf(seq, "S4bios ");
                }
        }
 
index c4aebf2..60a7ef6 100644 (file)
@@ -262,7 +262,8 @@ dma_pool_destroy (struct dma_pool *pool)
  * If such a memory block can't be allocated, null is returned.
  */
 void *
-dma_pool_alloc (struct dma_pool *pool, int mem_flags, dma_addr_t *handle)
+dma_pool_alloc (struct dma_pool *pool, unsigned int __nocast mem_flags,
+               dma_addr_t *handle)
 {
        unsigned long           flags;
        struct dma_page         *page;
index 418b146..28f2c17 100644 (file)
@@ -1713,10 +1713,9 @@ static unsigned long pollcomplete(int ctlr)
 
        for (i = 20 * HZ; i > 0; i--) {
                done = hba[ctlr]->access.command_completed(hba[ctlr]);
-               if (done == FIFO_EMPTY) {
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(1);
-               } else
+               if (done == FIFO_EMPTY)
+                       schedule_timeout_uninterruptible(1);
+               else
                        return (done);
        }
        /* Invalid address to tell caller we ran out of time */
index 30c0903..cd056e7 100644 (file)
@@ -2260,6 +2260,8 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
        if (!atomic_dec_and_test(&cfqd->ref))
                return;
 
+       blk_put_queue(q);
+
        cfq_shutdown_timer_wq(cfqd);
        q->elevator->elevator_data = NULL;
 
@@ -2316,6 +2318,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
        e->elevator_data = cfqd;
 
        cfqd->queue = q;
+       atomic_inc(&q->refcnt);
 
        cfqd->max_queued = q->nr_requests / 4;
        q->nr_batching = cfq_queued;
index 7289f67..ac5ba46 100644 (file)
@@ -516,8 +516,7 @@ static int pcd_tray_move(struct cdrom_device_info *cdi, int position)
 
 static void pcd_sleep(int cs)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(cs);
+       schedule_timeout_interruptible(cs);
 }
 
 static int pcd_reset(struct pcd_unit *cd)
index 060b1f2..711d2f3 100644 (file)
@@ -507,8 +507,7 @@ static void pf_eject(struct pf_unit *pf)
 
 static void pf_sleep(int cs)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(cs);
+       schedule_timeout_interruptible(cs);
 }
 
 /* the ATAPI standard actually specifies the contents of all 7 registers
index 84d8e29..b398239 100644 (file)
@@ -276,8 +276,7 @@ static inline u8 DRIVE(struct pg *dev)
 
 static void pg_sleep(int cs)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(cs);
+       schedule_timeout_interruptible(cs);
 }
 
 static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg)
index 5fe8ee8..d8d3523 100644 (file)
@@ -383,8 +383,7 @@ static int pt_atapi(struct pt_unit *tape, char *cmd, int dlen, char *buf, char *
 
 static void pt_sleep(int cs)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(cs);
+       schedule_timeout_interruptible(cs);
 }
 
 static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg)
index e5f7494..e425ad3 100644 (file)
@@ -834,8 +834,7 @@ static int fd_eject(struct floppy_state *fs)
                        break;
                }
                swim3_select(fs, RELAX);
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(1);
+               schedule_timeout_interruptible(1);
                if (swim3_readbit(fs, DISK_IN) == 0)
                        break;
        }
@@ -906,8 +905,7 @@ static int floppy_open(struct inode *inode, struct file *filp)
                                break;
                        }
                        swim3_select(fs, RELAX);
-                       current->state = TASK_INTERRUPTIBLE;
-                       schedule_timeout(1);
+                       schedule_timeout_interruptible(1);
                }
                if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0
                                 || swim3_readbit(fs, DISK_IN) == 0))
@@ -992,8 +990,7 @@ static int floppy_revalidate(struct gendisk *disk)
                if (signal_pending(current))
                        break;
                swim3_select(fs, RELAX);
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(1);
+               schedule_timeout_interruptible(1);
        }
        ret = swim3_readbit(fs, SEEK_COMPLETE) == 0
                || swim3_readbit(fs, DISK_IN) == 0;
index a1283f6..89e3c2f 100644 (file)
@@ -338,8 +338,7 @@ static int swimiop_eject(struct floppy_state *fs)
                        err = -EINTR;
                        break;
                }
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(1);
+               schedule_timeout_interruptible(1);
        }
        release_drive(fs);
        return cmd->error;
index 0c4c121..0f48301 100644 (file)
@@ -34,6 +34,7 @@
  *                      - set initialised bit then.
  */
 
+//#define DEBUG /* uncomment if you want debugging info (pr_debug) */
 #include <linux/config.h>
 #include <linux/sched.h>
 #include <linux/fs.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
-#define PRINTK(x...) do {} while (0)
-#define dprintk(x...) do {} while (0)
-/*#define dprintk(x...) printk(x) */
-
 #define MM_MAXCARDS 4
 #define MM_RAHEAD 2      /* two sectors */
 #define MM_BLKSIZE 1024  /* 1k blocks */
@@ -299,7 +296,7 @@ static void mm_start_io(struct cardinfo *card)
 
        /* make the last descriptor end the chain */
        page = &card->mm_pages[card->Active];
-       PRINTK("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1);
+       pr_debug("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1);
        desc = &page->desc[page->cnt-1];
 
        desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN);
@@ -532,7 +529,7 @@ static void process_page(unsigned long data)
                activate(card);
        } else {
                /* haven't finished with this one yet */
-               PRINTK("do some more\n");
+               pr_debug("do some more\n");
                mm_start_io(card);
        }
  out_unlock:
@@ -555,7 +552,7 @@ static void process_page(unsigned long data)
 static int mm_make_request(request_queue_t *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
-       PRINTK("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size);
+       pr_debug("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size);
 
        bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/
        spin_lock_irq(&card->lock);
index 1676033..68b6d7b 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/wait.h>
 #include <linux/blkdev.h>
 #include <linux/blkpg.h>
+#include <linux/delay.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
@@ -62,7 +63,7 @@ static int xd[5] = { -1,-1,-1,-1, };
 
 #define XD_DONT_USE_DMA                0  /* Initial value. may be overriden using
                                      "nodma" module option */
-#define XD_INIT_DISK_DELAY     (30*HZ/1000)  /* 30 ms delay during disk initialization */
+#define XD_INIT_DISK_DELAY     (30)  /* 30 ms delay during disk initialization */
 
 /* Above may need to be increased if a problem with the 2nd drive detection
    (ST11M controller) or resetting a controller (WD) appears */
@@ -529,10 +530,8 @@ static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long t
        int success;
 
        xdc_busy = 1;
-       while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry)) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry))
+               schedule_timeout_uninterruptible(1);
        xdc_busy = 0;
        return (success);
 }
@@ -633,14 +632,12 @@ static u_char __init xd_initdrives (void (*init_drive)(u_char drive))
        for (i = 0; i < XD_MAXDRIVES; i++) {
                xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0);
                if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(XD_INIT_DISK_DELAY);
+                       msleep_interruptible(XD_INIT_DISK_DELAY);
 
                        init_drive(count);
                        count++;
 
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(XD_INIT_DISK_DELAY);
+                       msleep_interruptible(XD_INIT_DISK_DELAY);
                }
        }
        return (count);
@@ -761,8 +758,7 @@ static void __init xd_wd_init_controller (unsigned int address)
 
        outb(0,XD_RESET);               /* reset the controller */
 
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(XD_INIT_DISK_DELAY);
+       msleep(XD_INIT_DISK_DELAY);
 }
 
 static void __init xd_wd_init_drive (u_char drive)
@@ -936,8 +932,7 @@ If you need non-standard settings use the xd=... command */
        xd_maxsectors = 0x01;
        outb(0,XD_RESET);               /* reset the controller */
 
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(XD_INIT_DISK_DELAY);
+       msleep(XD_INIT_DISK_DELAY);
 }
 
 static void __init xd_xebec_init_drive (u_char drive)
index 007f6a6..bb5e8d6 100644 (file)
@@ -296,7 +296,7 @@ z2_open( struct inode *inode, struct file *filp )
     return 0;
 
 err_out_kfree:
-    kfree( z2ram_map );
+    kfree(z2ram_map);
 err_out:
     return rc;
 }
index 30a8977..466e9c2 100644 (file)
@@ -827,8 +827,7 @@ static void mark_timeout_audio(u_long i)
 static void sbp_sleep(u_int time)
 {
        sti();
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(time);
+       schedule_timeout_interruptible(time);
        sti();
 }
 /*==========================================================================*/
@@ -4216,7 +4215,8 @@ static int sbpcd_dev_ioctl(struct cdrom_device_info *cdi, u_int cmd,
                
        case CDROMAUDIOBUFSIZ: /* configure the audio buffer size */
                msg(DBG_IOC,"ioctl: CDROMAUDIOBUFSIZ entered.\n");
-               if (current_drive->sbp_audsiz>0) vfree(current_drive->aud_buf);
+               if (current_drive->sbp_audsiz>0)
+                       vfree(current_drive->aud_buf);
                current_drive->aud_buf=NULL;
                current_drive->sbp_audsiz=arg;
                
@@ -5910,7 +5910,8 @@ static void sbpcd_exit(void)
                put_disk(D_S[j].disk);
                devfs_remove("sbp/c0t%d", j);
                vfree(D_S[j].sbp_buf);
-               if (D_S[j].sbp_audsiz>0) vfree(D_S[j].aud_buf);
+               if (D_S[j].sbp_audsiz>0)
+                       vfree(D_S[j].aud_buf);
                if ((unregister_cdrom(D_S[j].sbpcd_infop) == -EINVAL))
                {
                        msg(DBG_INF, "What's that: can't unregister info %s.\n", major_name);
index 9f22e8f..e656599 100644 (file)
@@ -1478,8 +1478,7 @@ static int __init sony535_init(void)
        /* look for the CD-ROM, follows the procedure in the DOS driver */
        inb(select_unit_reg);
        /* wait for 40 18 Hz ticks (reverse-engineered from DOS driver) */
-       set_current_state(TASK_INTERRUPTIBLE);
-       schedule_timeout((HZ+17)*40/18);
+       schedule_timeout_interruptible((HZ+17)*40/18);
        inb(result_reg);
 
        outb(0, read_status_reg);       /* does a reset? */
index 4d4e602..82b43c5 100644 (file)
@@ -206,10 +206,9 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
                bridge->driver->cleanup();
        if (bridge->driver->free_gatt_table)
                bridge->driver->free_gatt_table(bridge);
-       if (bridge->key_list) {
-               vfree(bridge->key_list);
-               bridge->key_list = NULL;
-       }
+
+       vfree(bridge->key_list);
+       bridge->key_list = NULL;
 
        if (bridge->driver->agp_destroy_page &&
            bridge->driver->needs_scratch_page)
index 11f9ee5..927a5bb 100644 (file)
@@ -172,7 +172,7 @@ static int ac_register_board(unsigned long physloc, void __iomem *loc,
 
 void cleanup_module(void)
 {
-       int i;
+       unsigned int i;
 
        misc_deregister(&ac_miscdev);
 
@@ -195,7 +195,7 @@ int __init applicom_init(void)
        int i, numisa = 0;
        struct pci_dev *dev = NULL;
        void __iomem *RamIO;
-       int boardno;
+       int boardno, ret;
 
        printk(KERN_INFO "Applicom driver: $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $\n");
 
@@ -294,7 +294,8 @@ int __init applicom_init(void)
        }
 
        if (!numisa)
-               printk(KERN_WARNING"ac.o: No valid ISA Applicom boards found at mem 0x%lx\n",mem);
+               printk(KERN_WARNING "ac.o: No valid ISA Applicom boards found "
+                               "at mem 0x%lx\n", mem);
 
  fin:
        init_waitqueue_head(&FlagSleepRec);
@@ -304,7 +305,11 @@ int __init applicom_init(void)
        DeviceErrorCount = 0;
 
        if (numboards) {
-               misc_register(&ac_miscdev);
+               ret = misc_register(&ac_miscdev);
+               if (ret) {
+                       printk(KERN_WARNING "ac.o: Unable to register misc device\n");
+                       goto out;
+               }
                for (i = 0; i < MAX_BOARD; i++) {
                        int serial;
                        char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];
@@ -337,6 +342,17 @@ int __init applicom_init(void)
 
        else
                return -ENXIO;
+
+out:
+       for (i = 0; i < MAX_BOARD; i++) {
+               if (!apbs[i].RamIO)
+                       continue;
+               if (apbs[i].irq)
+                       free_irq(apbs[i].irq, &dummy);
+               iounmap(apbs[i].RamIO);
+       }
+       pci_disable_device(dev);
+       return ret;
 }
 
 
index 1704a2a..b2e0928 100644 (file)
@@ -387,10 +387,8 @@ int fdc_interrupt_wait(unsigned int time)
 
        set_current_state(TASK_INTERRUPTIBLE);
        add_wait_queue(&ftape_wait_intr, &wait);
-       while (!ft_interrupt_seen && timeout) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               timeout = schedule_timeout(timeout);
-        }
+       while (!ft_interrupt_seen && timeout)
+               timeout = schedule_timeout_interruptible(timeout);
 
        spin_lock_irq(&current->sighand->siglock);
        current->blocked = old_sigmask;
index 5fe8461..de0379b 100644 (file)
@@ -100,14 +100,14 @@ static struct hpets *hpets;
 #endif
 
 #ifndef readq
-static unsigned long long __inline readq(void __iomem *addr)
+static inline unsigned long long readq(void __iomem *addr)
 {
        return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
 }
 #endif
 
 #ifndef writeq
-static void __inline writeq(unsigned long long v, void __iomem *addr)
+static inline void writeq(unsigned long long v, void __iomem *addr)
 {
        writel(v & 0xffffffff, addr);
        writel(v >> 32, addr + 4);
index 3480535..6f673d2 100644 (file)
@@ -513,10 +513,7 @@ static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
                        return ret ? : -EAGAIN;
 
                if(need_resched())
-               {
-                       current->state = TASK_INTERRUPTIBLE;
-                       schedule_timeout(1);
-               }
+                       schedule_timeout_interruptible(1);
                else
                        udelay(200);    /* FIXME: We could poll for 250uS ?? */
 
index 82c5f30..ba85eb1 100644 (file)
@@ -655,8 +655,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
                        timeout--;   // So negative values == forever
                
                if (!in_interrupt()) {
-                       current->state = TASK_INTERRUPTIBLE;
-                       schedule_timeout(1);    // short nap 
+                       schedule_timeout_interruptible(1);      // short nap
                } else {
                        // we cannot sched/sleep in interrrupt silly
                        return 0;   
@@ -1132,8 +1131,7 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count, int user )
 
                                        ip2trace (CHANN, ITRC_OUTPUT, 61, 0 );
 
-                                       current->state = TASK_INTERRUPTIBLE;
-                                       schedule_timeout(2);
+                                       schedule_timeout_interruptible(2);
                                        if (signal_pending(current)) {
                                                break;
                                        }
index 278f841..b6e5cbf 100644 (file)
@@ -1920,8 +1920,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
        for (;;)
        {
                if (smi_result == SI_SM_CALL_WITH_DELAY) {
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(1);
+                       schedule_timeout_uninterruptible(1);
                        smi_result = smi_info->handlers->event(
                                smi_info->si_sm, 100);
                }
@@ -2256,10 +2255,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
 
        /* Wait for the timer to stop.  This avoids problems with race
           conditions removing the timer here. */
-       while (! new_smi->timer_stopped) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while (!new_smi->timer_stopped)
+               schedule_timeout_uninterruptible(1);
 
  out_err:
        if (new_smi->intf)
@@ -2379,17 +2376,14 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
 
        /* Wait for the timer to stop.  This avoids problems with race
           conditions removing the timer here. */
-       while (! to_clean->timer_stopped) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while (!to_clean->timer_stopped)
+               schedule_timeout_uninterruptible(1);
 
        /* Interrupts and timeouts are stopped, now make sure the
           interface is in a clean state. */
        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
                poll(to_clean);
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
+               schedule_timeout_uninterruptible(1);
        }
 
        rv = ipmi_unregister_smi(to_clean->intf);
index e71aaae..2da64bf 100644 (file)
@@ -1037,10 +1037,8 @@ static __exit void ipmi_unregister_watchdog(void)
        /* Wait to make sure the message makes it out.  The lower layer has
           pointers to our buffers, we want to make sure they are done before
           we release our memory. */
-       while (atomic_read(&set_timeout_tofree)) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while (atomic_read(&set_timeout_tofree))
+               schedule_timeout_uninterruptible(1);
 
        /* Disconnect from IPMI. */
        rv = ipmi_destroy_user(watchdog_user);
index cf01a72..b771611 100644 (file)
@@ -613,10 +613,15 @@ static struct miscdevice lcd_dev = {
 
 static int lcd_init(void)
 {
+       int ret;
        unsigned long data;
 
        pr_info("%s\n", LCD_DRIVER);
-       misc_register(&lcd_dev);
+       ret = misc_register(&lcd_dev);
+       if (ret) {
+               printk(KERN_WARNING LCD "Unable to register misc device.\n");
+               return ret;
+       }
 
        /* Check region? Naaah! Just snarf it up. */
 /*     request_region(RTC_PORT(0), RTC_IO_EXTENT, "lcd");*/
index 59eebe5..2afb903 100644 (file)
 #include <linux/console.h>
 #include <linux/device.h>
 #include <linux/wait.h>
+#include <linux/jiffies.h>
 
 #include <linux/parport.h>
 #undef LP_STATS
@@ -307,7 +308,7 @@ static ssize_t lp_write(struct file * file, const char __user * buf,
                        (LP_F(minor) & LP_ABORT));
 
 #ifdef LP_STATS
-       if (jiffies-lp_table[minor].lastcall > LP_TIME(minor))
+       if (time_after(jiffies, lp_table[minor].lastcall + LP_TIME(minor)))
                lp_table[minor].runchars = 0;
 
        lp_table[minor].lastcall = jiffies;
index d0ef1ae..45d012d 100644 (file)
@@ -1058,8 +1058,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
                 */
                timeout = jiffies + HZ;
                while (!(inb(info->base + UART_LSR) & UART_LSR_TEMT)) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(5);
+                       schedule_timeout_interruptible(5);
                        if (time_after(jiffies, timeout))
                                break;
                }
@@ -1080,10 +1079,8 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
        info->event = 0;
        info->tty = NULL;
        if (info->blocked_open) {
-               if (info->close_delay) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(info->close_delay);
-               }
+               if (info->close_delay)
+                       schedule_timeout_interruptible(info->close_delay);
                wake_up_interruptible(&info->open_wait);
        }
 
@@ -1801,8 +1798,7 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
 #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
                printk("lsr = %d (jiff=%lu)...", lsr, jiffies);
 #endif
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(char_time);
+               schedule_timeout_interruptible(char_time);
                if (signal_pending(current))
                        break;
                if (timeout && time_after(jiffies, orig_jiffies + timeout))
index 09103b3..c9bdf54 100644 (file)
@@ -62,7 +62,7 @@
 
 static inline unsigned char *alloc_buf(void)
 {
-       int prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+       unsigned int prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
 
        if (PAGE_SIZE != N_TTY_BUF_SIZE)
                return kmalloc(N_TTY_BUF_SIZE, prio);
index 7a0c746..02d7f04 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * linux/drivers/char/pcmcia/synclink_cs.c
  *
- * $Id: synclink_cs.c,v 4.26 2004/08/11 19:30:02 paulkf Exp $
+ * $Id: synclink_cs.c,v 4.34 2005/09/08 13:20:54 paulkf Exp $
  *
  * Device driver for Microgate SyncLink PC Card
  * multiprotocol serial adapter.
@@ -472,7 +472,7 @@ module_param_array(dosyncppp, int, NULL, 0);
 MODULE_LICENSE("GPL");
 
 static char *driver_name = "SyncLink PC Card driver";
-static char *driver_version = "$Revision: 4.26 $";
+static char *driver_version = "$Revision: 4.34 $";
 
 static struct tty_driver *serial_driver;
 
@@ -1457,6 +1457,8 @@ static int startup(MGSLPC_INFO * info)
 
        info->pending_bh = 0;
        
+       memset(&info->icount, 0, sizeof(info->icount));
+
        init_timer(&info->tx_timer);
        info->tx_timer.data = (unsigned long)info;
        info->tx_timer.function = tx_timeout;
@@ -1946,9 +1948,13 @@ static int get_stats(MGSLPC_INFO * info, struct mgsl_icount __user *user_icount)
        int err;
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("get_params(%s)\n", info->device_name);
-       COPY_TO_USER(err,user_icount, &info->icount, sizeof(struct mgsl_icount));
-       if (err)
-               return -EFAULT;
+       if (!user_icount) {
+               memset(&info->icount, 0, sizeof(info->icount));
+       } else {
+               COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
+               if (err)
+                       return -EFAULT;
+       }
        return 0;
 }
 
index f174aee..9e9cf14 100644 (file)
@@ -560,7 +560,7 @@ ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq)
 EXPORT_SYMBOL_GPL(__ide_abort);
 
 /**
- *     ide_abort       -       abort pending IDE operatins
+ *     ide_abort       -       abort pending IDE operations
  *     @drive: drive the error occurred on
  *     @msg: message to report
  *
@@ -623,7 +623,7 @@ static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect,
  *     @drive: drive the completion interrupt occurred on
  *
  *     drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
- *     We do any necessary daya reading and then wait for the drive to
+ *     We do any necessary data reading and then wait for the drive to
  *     go non busy. At that point we may read the error data and complete
  *     the request
  */
@@ -773,7 +773,7 @@ EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
 
 /**
  *     execute_drive_command   -       issue special drive command
- *     @drive: the drive to issue th command on
+ *     @drive: the drive to issue the command on
  *     @rq: the request structure holding the command
  *
  *     execute_drive_cmd() issues a special drive command,  usually 
index 5a3dc46..ee38e6b 100644 (file)
@@ -2903,8 +2903,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
                } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
                             (tape->ascq == 1 || tape->ascq == 8)))
                        return -EIO;
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(HZ / 10);
+               msleep(100);
        }
        return -EIO;
 }
index c1196ce..2fcfac6 100644 (file)
@@ -27,6 +27,7 @@
  * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
  */
 
+#include <linux/kernel.h>
 #include <linux/hdreg.h>
 
 #define XFER_PIO_5             0x0d
@@ -96,11 +97,9 @@ static struct ide_timing ide_timing[] = {
 #define IDE_TIMING_UDMA                0x80
 #define IDE_TIMING_ALL         0xff
 
-#define MIN(a,b)       ((a)<(b)?(a):(b))
-#define MAX(a,b)       ((a)>(b)?(a):(b))
-#define FIT(v,min,max) MAX(MIN(v,max),min)
-#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
-#define EZ(v,unit)     ((v)?ENOUGH(v,unit):0)
+#define FIT(v,vmin,vmax)       max_t(short,min_t(short,v,vmax),vmin)
+#define ENOUGH(v,unit)         (((v)-1)/(unit)+1)
+#define EZ(v,unit)             ((v)?ENOUGH(v,unit):0)
 
 #define XFER_MODE      0xf0
 #define XFER_UDMA_133  0x48
@@ -188,14 +187,14 @@ static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, int
 
 static void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, struct ide_timing *m, unsigned int what)
 {
-       if (what & IDE_TIMING_SETUP  ) m->setup   = MAX(a->setup,   b->setup);
-       if (what & IDE_TIMING_ACT8B  ) m->act8b   = MAX(a->act8b,   b->act8b);
-       if (what & IDE_TIMING_REC8B  ) m->rec8b   = MAX(a->rec8b,   b->rec8b);
-       if (what & IDE_TIMING_CYC8B  ) m->cyc8b   = MAX(a->cyc8b,   b->cyc8b);
-       if (what & IDE_TIMING_ACTIVE ) m->active  = MAX(a->active,  b->active);
-       if (what & IDE_TIMING_RECOVER) m->recover = MAX(a->recover, b->recover);
-       if (what & IDE_TIMING_CYCLE  ) m->cycle   = MAX(a->cycle,   b->cycle);
-       if (what & IDE_TIMING_UDMA   ) m->udma    = MAX(a->udma,    b->udma);
+       if (what & IDE_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
+       if (what & IDE_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
+       if (what & IDE_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
+       if (what & IDE_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
+       if (what & IDE_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
+       if (what & IDE_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
+       if (what & IDE_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
+       if (what & IDE_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
 }
 
 static struct ide_timing* ide_timing_find_mode(short speed)
index dc0841b..0ccf85f 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/ide.h>
 #include <linux/hdreg.h>
 #include <linux/major.h>
+#include <linux/delay.h>
 #include <asm/io.h>
 #include <asm/system.h>
 
@@ -340,8 +341,7 @@ static void ide_config(dev_link_t *link)
                break;
            }
        }
-       __set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(HZ/10);
+       msleep(100);
     }
 
     if (hd < 0) {
index baf4bca..0afe442 100644 (file)
@@ -283,23 +283,19 @@ static void bsd_free (void *state)
                /*
                 * Release the dictionary
                 */
-               if (db->dict) {
-                       vfree (db->dict);
-                       db->dict = NULL;
-               }
+               vfree(db->dict);
+               db->dict = NULL;
 
                /*
                 * Release the string buffer
                 */
-               if (db->lens) {
-                       vfree (db->lens);
-                       db->lens = NULL;
-               }
+               vfree(db->lens);
+               db->lens = NULL;
 
                /*
                 * Finally release the structure itself.
                 */
-               kfree (db);
+               kfree(db);
        }
 }
 
index eebcb0b..8a7d54a 100644 (file)
@@ -1953,7 +1953,8 @@ isdn_add_channels(isdn_driver_t *d, int drvidx, int n, int adding)
                kfree(d->rcvcount);
        if (!(d->rcvcount = kmalloc(sizeof(int) * m, GFP_ATOMIC))) {
                printk(KERN_WARNING "register_isdn: Could not alloc rcvcount\n");
-               if (!adding) kfree(d->rcverr);
+               if (!adding)
+                       kfree(d->rcverr);
                return -1;
        }
        memset((char *) d->rcvcount, 0, sizeof(int) * m);
index 17212b4..cc07bbe 100644 (file)
@@ -568,12 +568,9 @@ int dm_create_persistent(struct exception_store *store, uint32_t chunk_size)
 
       bad:
        dm_io_put(sectors_to_pages(chunk_size));
-       if (ps) {
-               if (ps->area)
-                       free_area(ps);
-
-               kfree(ps);
-       }
+       if (ps && ps->area)
+               free_area(ps);
+       kfree(ps);
        return r;
 }
 
index 1554b92..2897df9 100644 (file)
@@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(pers_lock);
  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
  * is 1000 KB/sec, so the extra system load does not show up that much.
  * Increase it if you want to have more _guaranteed_ speed. Note that
- * the RAID driver will use the maximum available bandwith if the IO
+ * the RAID driver will use the maximum available bandwidth if the IO
  * subsystem is idle. There is also an 'absolute maximum' reconstruction
  * speed limit - in case reconstruction slows down your system despite
  * idle IO detection.
@@ -3616,7 +3616,7 @@ static void md_do_sync(mddev_t *mddev)
        printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
        printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
                " %d KB/sec/disc.\n", sysctl_speed_limit_min);
-       printk(KERN_INFO "md: using maximum available idle IO bandwith "
+       printk(KERN_INFO "md: using maximum available idle IO bandwidth "
               "(but not more than %d KB/sec) for reconstruction.\n",
               sysctl_speed_limit_max);
 
index cd5828b..206cc2f 100644 (file)
@@ -168,10 +168,8 @@ void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt)
                return;
        pci_free_consistent(pci, pt->size, pt->cpu, pt->dma);
        pt->cpu = NULL;
-       if (NULL != pt->slist) {
-               kfree(pt->slist);
-               pt->slist = NULL;
-       }
+       kfree(pt->slist);
+       pt->slist = NULL;
 }
 
 int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt)
index cdda423..9774e94 100644 (file)
@@ -445,10 +445,8 @@ static void cpia_usb_free_resources(struct usb_cpia *ucpia, int try)
                ucpia->sbuf[1].urb = NULL;
        }
 
-       if (ucpia->sbuf[1].data) {
-               kfree(ucpia->sbuf[1].data);
-               ucpia->sbuf[1].data = NULL;
-       }
+       kfree(ucpia->sbuf[1].data);
+       ucpia->sbuf[1].data = NULL;
  
        if (ucpia->sbuf[0].urb) {
                usb_kill_urb(ucpia->sbuf[0].urb);
@@ -456,10 +454,8 @@ static void cpia_usb_free_resources(struct usb_cpia *ucpia, int try)
                ucpia->sbuf[0].urb = NULL;
        }
 
-       if (ucpia->sbuf[0].data) {
-               kfree(ucpia->sbuf[0].data);
-               ucpia->sbuf[0].data = NULL;
-       }
+       kfree(ucpia->sbuf[0].data);
+       ucpia->sbuf[0].data = NULL;
 }
 
 static int cpia_usb_close(void *privdata)
@@ -623,20 +619,14 @@ static void cpia_disconnect(struct usb_interface *intf)
 
        ucpia->curbuff = ucpia->workbuff = NULL;
 
-       if (ucpia->buffers[2]) {
-               vfree(ucpia->buffers[2]);
-               ucpia->buffers[2] = NULL;
-       }
+       vfree(ucpia->buffers[2]);
+       ucpia->buffers[2] = NULL;
 
-       if (ucpia->buffers[1]) {
-               vfree(ucpia->buffers[1]);
-               ucpia->buffers[1] = NULL;
-       }
+       vfree(ucpia->buffers[1]);
+       ucpia->buffers[1] = NULL;
 
-       if (ucpia->buffers[0]) {
-               vfree(ucpia->buffers[0]);
-               ucpia->buffers[0] = NULL;
-       }
+       vfree(ucpia->buffers[0]);
+       ucpia->buffers[0] = NULL;
 
        cam->lowlevel_data = NULL;
        kfree(ucpia);
index b577435..d4497db 100644 (file)
@@ -2184,30 +2184,18 @@ static void release_saa(void)
                vfree(saa->vidbuf);
                vfree(saa->audbuf);
                vfree(saa->osdbuf);
-               if (saa->dmavid2)
-                       kfree((void *) saa->dmavid2);
+               kfree(saa->dmavid2);
                saa->audbuf = saa->vidbuf = saa->osdbuf = NULL;
                saa->dmavid2 = NULL;
-               if (saa->dmadebi)
-                       kfree((void *) saa->dmadebi);
-               if (saa->dmavid1)
-                       kfree((void *) saa->dmavid1);
-               if (saa->dmavid2)
-                       kfree((void *) saa->dmavid2);
-               if (saa->dmavid3)
-                       kfree((void *) saa->dmavid3);
-               if (saa->dmaa1in)
-                       kfree((void *) saa->dmaa1in);
-               if (saa->dmaa1out)
-                       kfree((void *) saa->dmaa1out);
-               if (saa->dmaa2in)
-                       kfree((void *) saa->dmaa2in);
-               if (saa->dmaa2out)
-                       kfree((void *) saa->dmaa2out);
-               if (saa->dmaRPS1)
-                       kfree((void *) saa->dmaRPS1);
-               if (saa->dmaRPS2)
-                       kfree((void *) saa->dmaRPS2);
+               kfree(saa->dmadebi);
+               kfree(saa->dmavid1);
+               kfree(saa->dmavid3);
+               kfree(saa->dmaa1in);
+               kfree(saa->dmaa1out);
+               kfree(saa->dmaa2in);
+               kfree(saa->dmaa2out);
+               kfree(saa->dmaRPS1);
+               kfree(saa->dmaRPS2);
                free_irq(saa->irq, saa);
                if (saa->saa7146_mem)
                        iounmap(saa->saa7146_mem);
index 97354f2..574b8e3 100644 (file)
@@ -267,10 +267,10 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
                kfree(dma->pages);
                dma->pages = NULL;
        }
-       if (dma->vmalloc) {
-               vfree(dma->vmalloc);
-               dma->vmalloc = NULL;
-       }
+
+       vfree(dma->vmalloc);
+       dma->vmalloc = NULL;
+
        if (dma->bus_addr) {
                dma->bus_addr = 0;
        }
index ba838a4..53adeb7 100644 (file)
@@ -650,7 +650,7 @@ jpg_fbuffer_free (struct file *file)
                                     off += PAGE_SIZE)
                                        ClearPageReserved(MAP_NR
                                                          (mem + off));
-                               kfree((void *) mem);
+                               kfree(mem);
                                fh->jpg_buffers.buffer[i].frag_tab[0] = 0;
                                fh->jpg_buffers.buffer[i].frag_tab[1] = 0;
                        }
index c335331..0728681 100644 (file)
@@ -820,11 +820,9 @@ void zoran_close(struct video_device* dev)
         msleep(100);                   /* Wait 1/10th of a second */
 
        /* free the allocated framebuffer */
-       if (ztv->fbuffer)
-               bfree( ztv->fbuffer, ZORAN_MAX_FBUFSIZE );
+       bfree(ztv->fbuffer, ZORAN_MAX_FBUFSIZE);
        ztv->fbuffer = 0;
-       if (ztv->overinfo.overlay)
-               kfree( ztv->overinfo.overlay );
+       kfree(ztv->overinfo.overlay);
        ztv->overinfo.overlay = 0;
 
 }
index 7501fab..46de5c9 100644 (file)
@@ -192,22 +192,37 @@ static int hdpu_cpustate_probe(struct device *ddev)
 {
        struct platform_device *pdev = to_platform_device(ddev);
        struct resource *res;
+       struct proc_dir_entry *proc_de;
+       int ret;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        cpustate.set_addr = (unsigned long *)res->start;
        cpustate.clr_addr = (unsigned long *)res->end - 1;
 
-       misc_register(&cpustate_dev);
-       create_proc_read_entry("sky_cpustate", 0, 0, cpustate_read_proc, NULL);
+       ret = misc_register(&cpustate_dev);
+       if (ret) {
+               printk(KERN_WARNING "sky_cpustate: Unable to register misc "
+                                       "device.\n");
+               cpustate.set_addr = NULL;
+               cpustate.clr_addr = NULL;
+               return ret;
+       }
+
+       proc_de = create_proc_read_entry("sky_cpustate", 0, 0,
+                                       cpustate_read_proc, NULL);
+       if (proc_de == NULL)
+               printk(KERN_WARNING "sky_cpustate: Unable to create proc "
+                                       "dir entry\n");
 
        printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n");
        return 0;
 }
+
 static int hdpu_cpustate_remove(struct device *ddev)
 {
 
-       cpustate.set_addr = 0;
-       cpustate.clr_addr = 0;
+       cpustate.set_addr = NULL;
+       cpustate.clr_addr = NULL;
 
        remove_proc_entry("sky_cpustate", NULL);
        misc_deregister(&cpustate_dev);
index bb713fe..1443117 100644 (file)
@@ -91,8 +91,7 @@ static void __exit cleanup_mtdram(void)
 {
        if (mtd_info) {
                del_mtd_device(mtd_info);
-               if (mtd_info->priv)
-                       vfree(mtd_info->priv);
+               vfree(mtd_info->priv);
                kfree(mtd_info);
        }
 }
index d9ab60b..d32c1b3 100644 (file)
@@ -1017,27 +1017,16 @@ static int ftl_writesect(struct mtd_blktrans_dev *dev,
 
 void ftl_freepart(partition_t *part)
 {
-    if (part->VirtualBlockMap) {
        vfree(part->VirtualBlockMap);
        part->VirtualBlockMap = NULL;
-    }
-    if (part->VirtualPageMap) {
        kfree(part->VirtualPageMap);
        part->VirtualPageMap = NULL;
-    }
-    if (part->EUNInfo) {
        kfree(part->EUNInfo);
        part->EUNInfo = NULL;
-    }
-    if (part->XferInfo) {
        kfree(part->XferInfo);
        part->XferInfo = NULL;
-    }
-    if (part->bam_cache) {
        kfree(part->bam_cache);
        part->bam_cache = NULL;
-    }
-    
 } /* ftl_freepart */
 
 static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
index 3d88ad6..fb4098e 100644 (file)
@@ -323,33 +323,27 @@ static void bsd_reset (void *state)
  */
 
 static void bsd_free (void *state)
-  {
-    struct bsd_db *db = (struct bsd_db *) state;
+{
+       struct bsd_db *db = state;
     
-    if (db)
-      {
+       if (!db)
+               return;
+
 /*
  * Release the dictionary
  */
-       if (db->dict)
-         {
-           vfree (db->dict);
-           db->dict = NULL;
-         }
+       vfree(db->dict);
+       db->dict = NULL;
 /*
  * Release the string buffer
  */
-       if (db->lens)
-         {
-           vfree (db->lens);
-           db->lens = NULL;
-         }
+       vfree(db->lens);
+       db->lens = NULL;
 /*
  * Finally release the structure itself.
  */
-       kfree (db);
-      }
-  }
+       kfree(db);
+}
 
 /*
  * Allocate space for a (de) compressor.
index bb71638..0df7e92 100644 (file)
@@ -1232,9 +1232,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
        navail = 0;     /* total # of usable channels (not deregistered) */
        hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
        i = 0;
-       list = &ppp->channels;
-       while ((list = list->next) != &ppp->channels) {
-               pch = list_entry(list, struct channel, clist);
+       list_for_each_entry(pch, &ppp->channels, clist) {
                navail += pch->avail = (pch->chan != NULL);
                if (pch->avail) {
                        if (skb_queue_empty(&pch->file.xq) ||
@@ -1280,6 +1278,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
 
        /* skip to the channel after the one we last used
           and start at that one */
+       list = &ppp->channels;
        for (i = 0; i < ppp->nxchan; ++i) {
                list = list->next;
                if (list == &ppp->channels) {
@@ -1730,7 +1729,7 @@ static void
 ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
 {
        u32 mask, seq;
-       struct list_head *l;
+       struct channel *ch;
        int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
 
        if (!pskb_may_pull(skb, mphdrlen) || ppp->mrru == 0)
@@ -1784,8 +1783,7 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
         * The list of channels can't change because we have the receive
         * side of the ppp unit locked.
         */
-       for (l = ppp->channels.next; l != &ppp->channels; l = l->next) {
-               struct channel *ch = list_entry(l, struct channel, clist);
+       list_for_each_entry(ch, &ppp->channels, clist) {
                if (seq_before(ch->lastseq, seq))
                        seq = ch->lastseq;
        }
@@ -2271,10 +2269,8 @@ static struct compressor_entry *
 find_comp_entry(int proto)
 {
        struct compressor_entry *ce;
-       struct list_head *list = &compressor_list;
 
-       while ((list = list->next) != &compressor_list) {
-               ce = list_entry(list, struct compressor_entry, list);
+       list_for_each_entry(ce, &compressor_list, list) {
                if (ce->comp->compress_proto == proto)
                        return ce;
        }
@@ -2540,20 +2536,15 @@ static struct channel *
 ppp_find_channel(int unit)
 {
        struct channel *pch;
-       struct list_head *list;
 
-       list = &new_channels;
-       while ((list = list->next) != &new_channels) {
-               pch = list_entry(list, struct channel, list);
+       list_for_each_entry(pch, &new_channels, list) {
                if (pch->file.index == unit) {
                        list_del(&pch->list);
                        list_add(&pch->list, &all_channels);
                        return pch;
                }
        }
-       list = &all_channels;
-       while ((list = list->next) != &all_channels) {
-               pch = list_entry(list, struct channel, list);
+       list_for_each_entry(pch, &all_channels, list) {
                if (pch->file.index == unit)
                        return pch;
        }
index 93800c1..ee48bfd 100644 (file)
@@ -2144,9 +2144,9 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
     u_long iobase = 0;                     /* Clear upper 32 bits in Alphas */
     int i, j, cfrv;
     struct de4x5_private *lp = netdev_priv(dev);
-    struct list_head *walk = &pdev->bus_list;
+    struct list_head *walk;
 
-    for (walk = walk->next; walk != &pdev->bus_list; walk = walk->next) {
+    list_for_each(walk, &pdev->bus_list) {
        struct pci_dev *this_dev = pci_dev_b(walk);
 
        /* Skip the pci_bus list entry */
index 7318550..cb84a4e 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/pm.h>
-#include <linux/slab.h>
 #include <linux/types.h>
 
 #include <asm/io.h>
index 694bae1..5b887ba 100644 (file)
@@ -196,7 +196,7 @@ int parport_wait_peripheral(struct parport *port,
                return 1;
 
        /* 40ms of slow polling. */
-       deadline = jiffies + (HZ + 24) / 25;
+       deadline = jiffies + msecs_to_jiffies(40);
        while (time_before (jiffies, deadline)) {
                int ret;
 
@@ -205,7 +205,7 @@ int parport_wait_peripheral(struct parport *port,
 
                /* Wait for 10ms (or until an interrupt occurs if
                 * the handler is set) */
-               if ((ret = parport_wait_event (port, (HZ + 99) / 100)) < 0)
+               if ((ret = parport_wait_event (port, msecs_to_jiffies(10))) < 0)
                        return ret;
 
                status = parport_read_status (port);
@@ -216,8 +216,7 @@ int parport_wait_peripheral(struct parport *port,
                        /* parport_wait_event didn't time out, but the
                         * peripheral wasn't actually ready either.
                         * Wait for another 10ms. */
-                       __set_current_state (TASK_INTERRUPTIBLE);
-                       schedule_timeout ((HZ+ 99) / 100);
+                       schedule_timeout_interruptible(msecs_to_jiffies(10));
                }
        }
 
index 6624278..ce1e2aa 100644 (file)
@@ -60,7 +60,7 @@ size_t parport_ieee1284_write_compat (struct parport *port,
        parport_data_forward (port);
        while (count < len) {
                unsigned long expire = jiffies + dev->timeout;
-               long wait = (HZ + 99) / 100;
+               long wait = msecs_to_jiffies(10);
                unsigned char mask = (PARPORT_STATUS_ERROR
                                      | PARPORT_STATUS_BUSY);
                unsigned char val = (PARPORT_STATUS_ERROR
@@ -97,8 +97,7 @@ size_t parport_ieee1284_write_compat (struct parport *port,
                            our interrupt handler called. */
                        if (count && no_irq) {
                                parport_release (dev);
-                               __set_current_state (TASK_INTERRUPTIBLE);
-                               schedule_timeout (wait);
+                               schedule_timeout_interruptible(wait);
                                parport_claim_or_block (dev);
                        }
                        else
@@ -542,13 +541,12 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
                        /* Yield the port for a while. */
                        if (count && dev->port->irq != PARPORT_IRQ_NONE) {
                                parport_release (dev);
-                               __set_current_state (TASK_INTERRUPTIBLE);
-                               schedule_timeout ((HZ + 24) / 25);
+                               schedule_timeout_interruptible(msecs_to_jiffies(40));
                                parport_claim_or_block (dev);
                        }
                        else
                                /* We must have the device claimed here. */
-                               parport_wait_event (port, (HZ + 24) / 25);
+                               parport_wait_event (port, msecs_to_jiffies(40));
 
                        /* Is there a signal pending? */
                        if (signal_pending (current))
index 1b938bb..c6493ad 100644 (file)
@@ -173,8 +173,7 @@ static int change_mode(struct parport *p, int m)
                                if (time_after_eq (jiffies, expire))
                                        /* The FIFO is stuck. */
                                        return -EBUSY;
-                               __set_current_state (TASK_INTERRUPTIBLE);
-                               schedule_timeout ((HZ + 99) / 100);
+                               schedule_timeout_interruptible(msecs_to_jiffies(10));
                                if (signal_pending (current))
                                        break;
                        }
index cc9d653..56a3b39 100644 (file)
@@ -44,10 +44,14 @@ pci_config_attr(subsystem_device, "0x%04x\n");
 pci_config_attr(class, "0x%06x\n");
 pci_config_attr(irq, "%u\n");
 
-static ssize_t local_cpus_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t local_cpus_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
 {              
-       cpumask_t mask = pcibus_to_cpumask(to_pci_dev(dev)->bus);
-       int len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
+       cpumask_t mask;
+       int len;
+
+       mask = pcibus_to_cpumask(to_pci_dev(dev)->bus);
+       len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
        strcat(buf,"\n"); 
        return 1+len;
 }
index 35caec1..26a55d0 100644 (file)
@@ -72,11 +72,13 @@ void pci_remove_legacy_files(struct pci_bus *bus) { return; }
 /*
  * PCI Bus Class Devices
  */
-static ssize_t pci_bus_show_cpuaffinity(struct class_device *class_dev, char *buf)
+static ssize_t pci_bus_show_cpuaffinity(struct class_device *class_dev,
+                                       char *buf)
 {
-       cpumask_t cpumask = pcibus_to_cpumask(to_pci_bus(class_dev));
        int ret;
+       cpumask_t cpumask;
 
+       cpumask = pcibus_to_cpumask(to_pci_bus(class_dev));
        ret = cpumask_scnprintf(buf, PAGE_SIZE, cpumask);
        if (ret < PAGE_SIZE)
                buf[ret++] = '\n';
index d44205d..d89f83f 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #define __KERNEL_SYSCALLS__
+static int errno;
 
 #include <linux/kernel.h>
 #include <linux/kthread.h>
@@ -13,8 +14,6 @@
 #include <linux/delay.h>
 #include <asm/oplib.h>
 #include <asm/ebus.h>
-static int errno;
-#include <asm/unistd.h>
 
 #include "bbc_i2c.h"
 #include "max1617.h"
index d765cc1..b0cc3c2 100644 (file)
@@ -20,6 +20,7 @@
  */
 
 #define __KERNEL_SYSCALLS__
+static int errno;
 
 #include <linux/config.h>
 #include <linux/module.h>
@@ -38,9 +39,6 @@
 #include <asm/uaccess.h>
 #include <asm/envctrl.h>
 
-static int errno;
-#include <asm/unistd.h>
-
 #define ENVCTRL_MINOR  162
 
 #define PCF8584_ADDRESS        0x55
index 2341d27..7a33c70 100644 (file)
@@ -6090,8 +6090,8 @@ NCR53c7x0_release(struct Scsi_Host *host) {
     if (hostdata->num_cmds)
        printk ("scsi%d : leaked %d NCR53c7x0_cmd structures\n",
            host->host_no, hostdata->num_cmds);
-    if (hostdata->events) 
-       vfree ((void *)hostdata->events);
+
+    vfree(hostdata->events);
 
     /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, which
      * XXX may be invalid (CONFIG_060_WRITETHROUGH)
index 13ecd0c..da6e51c 100644 (file)
@@ -560,7 +560,7 @@ ch_set_voltag(scsi_changer *ch, u_int elem,
        return result;
 }
 
-static int ch_gstatus(scsi_changer *ch, int type, unsigned char *dest)
+static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest)
 {
        int retval = 0;
        u_char data[16];
@@ -634,6 +634,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
 {
        scsi_changer *ch = file->private_data;
        int retval;
+       void __user *argp = (void __user *)arg;
        
        switch (cmd) {
        case CHIOGPARAMS:
@@ -646,7 +647,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
                params.cp_nportals  = ch->counts[CHET_IE];
                params.cp_ndrives   = ch->counts[CHET_DT];
                
-               if (copy_to_user((void *) arg, &params, sizeof(params)))
+               if (copy_to_user(argp, &params, sizeof(params)))
                        return -EFAULT;
                return 0;
        }
@@ -671,7 +672,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
                        vparams.cvp_n4  = ch->counts[CHET_V4];
                        strncpy(vparams.cvp_label4,vendor_labels[3],16);
                }
-               if (copy_to_user((void *) arg, &vparams, sizeof(vparams)))
+               if (copy_to_user(argp, &vparams, sizeof(vparams)))
                        return -EFAULT;
                return 0;
        }
@@ -680,7 +681,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
        {
                struct changer_position pos;
                
-               if (copy_from_user(&pos, (void*)arg, sizeof (pos)))
+               if (copy_from_user(&pos, argp, sizeof (pos)))
                        return -EFAULT;
 
                if (0 != ch_checkrange(ch, pos.cp_type, pos.cp_unit)) {
@@ -699,7 +700,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
        {
                struct changer_move mv;
 
-               if (copy_from_user(&mv, (void*)arg, sizeof (mv)))
+               if (copy_from_user(&mv, argp, sizeof (mv)))
                        return -EFAULT;
 
                if (0 != ch_checkrange(ch, mv.cm_fromtype, mv.cm_fromunit) ||
@@ -721,7 +722,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
        {
                struct changer_exchange mv;
                
-               if (copy_from_user(&mv, (void*)arg, sizeof (mv)))
+               if (copy_from_user(&mv, argp, sizeof (mv)))
                        return -EFAULT;
 
                if (0 != ch_checkrange(ch, mv.ce_srctype,  mv.ce_srcunit ) ||
@@ -746,7 +747,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
        {
                struct changer_element_status ces;
                
-               if (copy_from_user(&ces, (void*)arg, sizeof (ces)))
+               if (copy_from_user(&ces, argp, sizeof (ces)))
                        return -EFAULT;
                if (ces.ces_type < 0 || ces.ces_type >= CH_TYPES)
                        return -EINVAL;
@@ -762,7 +763,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
                unsigned int elem;
                int     result,i;
                
-               if (copy_from_user(&cge, (void*)arg, sizeof (cge)))
+               if (copy_from_user(&cge, argp, sizeof (cge)))
                        return -EFAULT;
 
                if (0 != ch_checkrange(ch, cge.cge_type, cge.cge_unit))
@@ -825,7 +826,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
                kfree(buffer);
                up(&ch->lock);
                
-               if (copy_to_user((void*)arg, &cge, sizeof (cge)))
+               if (copy_to_user(argp, &cge, sizeof (cge)))
                        return -EFAULT;
                return result;
        }
@@ -843,7 +844,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
                struct changer_set_voltag csv;
                int elem;
 
-               if (copy_from_user(&csv, (void*)arg, sizeof(csv)))
+               if (copy_from_user(&csv, argp, sizeof(csv)))
                        return -EFAULT;
 
                if (0 != ch_checkrange(ch, csv.csv_type, csv.csv_unit)) {
@@ -861,7 +862,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
        }
 
        default:
-               return scsi_ioctl(ch->device, cmd, (void*)arg);
+               return scsi_ioctl(ch->device, cmd, argp);
 
        }
 }
@@ -894,9 +895,9 @@ static long ch_ioctl_compat(struct file * file,
        case CHIOGSTATUS32:
        {
                struct changer_element_status32 ces32;
-               unsigned char *data;
+               unsigned char __user *data;
                
-               if (copy_from_user(&ces32, (void*)arg, sizeof (ces32)))
+               if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32)))
                        return -EFAULT;
                if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
                        return -EINVAL;
index d72be0c..3fda8d4 100644 (file)
@@ -691,8 +691,7 @@ int cpqfcTS_ioctl( struct scsi_device *ScsiDev, int Cmnd, void *arg)
         if(  copy_to_user( vendor_cmd->bufp, buf, vendor_cmd->len))
                result = -EFAULT;
 
-        if( buf) 
-         kfree( buf);
+       kfree(buf);
 
         return result;
       }
index b5dc353..6e54c7d 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/proc_fs.h>
 #include <linux/stat.h>
 #include <linux/mca.h>
-#include <linux/string.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
 #include <linux/mca-legacy.h>
index 89a4a06..3f2f246 100644 (file)
@@ -1377,7 +1377,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi
        
                if ((STp->buffer)->syscall_result || !SRpnt) {
                        printk(KERN_ERR "%s:E: Failed to read frame back from OnStream buffer\n", name);
-                       vfree((void *)buffer);
+                       vfree(buffer);
                        *aSRpnt = SRpnt;
                        return (-EIO);
                }
@@ -1419,7 +1419,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi
 
                        if (new_frame > frame + 1000) {
                                printk(KERN_ERR "%s:E: Failed to find writable tape media\n", name);
-                               vfree((void *)buffer);
+                               vfree(buffer);
                                return (-EIO);
                        }
                        if ( i >= nframes + pending ) break;
@@ -1500,7 +1500,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi
                             SRpnt->sr_sense_buffer[12]         ==  0 &&
                             SRpnt->sr_sense_buffer[13]         ==  2) {
                                printk(KERN_ERR "%s:E: Volume overflow in write error recovery\n", name);
-                               vfree((void *)buffer);
+                               vfree(buffer);
                                return (-EIO);                  /* hit end of tape = fail */
                        }
                        i = ((SRpnt->sr_sense_buffer[3] << 24) |
@@ -1525,7 +1525,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi
        }
        if (!pending)
                osst_copy_to_buffer(STp->buffer, p);    /* so buffer content == at entry in all cases */
-       vfree((void *)buffer);
+       vfree(buffer);
        return 0;
 }
 
@@ -5852,7 +5852,7 @@ static int osst_remove(struct device *dev)
                        os_scsi_tapes[i] = NULL;
                        osst_nr_dev--;
                        write_unlock(&os_scsi_tapes_lock);
-                       if (tpnt->header_cache != NULL) vfree(tpnt->header_cache);
+                       vfree(tpnt->header_cache);
                        if (tpnt->buffer) {
                                normalize_buffer(tpnt->buffer);
                                kfree(tpnt->buffer);
@@ -5896,8 +5896,7 @@ static void __exit exit_osst (void)
                for (i=0; i < osst_max_dev; ++i) {
                        if (!(STp = os_scsi_tapes[i])) continue;
                        /* This is defensive, supposed to happen during detach */
-                       if (STp->header_cache)
-                               vfree(STp->header_cache);
+                       vfree(STp->header_cache);
                        if (STp->buffer) {
                                normalize_buffer(STp->buffer);
                                kfree(STp->buffer);
index 5b65e20..4d75cdf 100644 (file)
@@ -864,7 +864,7 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
        /*
         * We're pretty sure there's a port here.  Lets find out what
         * type of port it is.  The IIR top two bits allows us to find
-        * out if its 8250 or 16450, 16550, 16550A or later.  This
+        * out if it's 8250 or 16450, 16550, 16550A or later.  This
         * determines what we test for next.
         *
         * We also initialise the EFR (if any) to zero for later.  The
index f2c9fa4..f670468 100644 (file)
@@ -774,10 +774,7 @@ static int ixj_wink(IXJ *j)
        j->pots_winkstart = jiffies;
        SLIC_SetState(PLD_SLIC_STATE_OC, j);
 
-       while (time_before(jiffies, j->pots_winkstart + j->winktime)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       msleep(jiffies_to_msecs(j->winktime));
 
        SLIC_SetState(slicnow, j);
        return 0;
@@ -1912,7 +1909,6 @@ static int ixj_pcmcia_cable_check(IXJ *j)
 
 static int ixj_hookstate(IXJ *j)
 {
-       unsigned long det;
        int fOffHook = 0;
 
        switch (j->cardtype) {
@@ -1943,11 +1939,7 @@ static int ixj_hookstate(IXJ *j)
                            j->pld_slicr.bits.state == PLD_SLIC_STATE_STANDBY) {
                                if (j->flags.ringing || j->flags.cringing) {
                                        if (!in_interrupt()) {
-                                               det = jiffies + (hertz / 50);
-                                               while (time_before(jiffies, det)) {
-                                                       set_current_state(TASK_INTERRUPTIBLE);
-                                                       schedule_timeout(1);
-                                               }
+                                               msleep(20);
                                        }
                                        SLIC_GetState(j);
                                        if (j->pld_slicr.bits.state == PLD_SLIC_STATE_RINGING) {
@@ -2062,7 +2054,7 @@ static void ixj_ring_start(IXJ *j)
 static int ixj_ring(IXJ *j)
 {
        char cntr;
-       unsigned long jif, det;
+       unsigned long jif;
 
        j->flags.ringing = 1;
        if (ixj_hookstate(j) & 1) {
@@ -2070,7 +2062,6 @@ static int ixj_ring(IXJ *j)
                j->flags.ringing = 0;
                return 1;
        }
-       det = 0;
        for (cntr = 0; cntr < j->maxrings; cntr++) {
                jif = jiffies + (1 * hertz);
                ixj_ring_on(j);
@@ -2080,8 +2071,7 @@ static int ixj_ring(IXJ *j)
                                j->flags.ringing = 0;
                                return 1;
                        }
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(1);
+                       schedule_timeout_interruptible(1);
                        if (signal_pending(current))
                                break;
                }
@@ -2089,20 +2079,13 @@ static int ixj_ring(IXJ *j)
                ixj_ring_off(j);
                while (time_before(jiffies, jif)) {
                        if (ixj_hookstate(j) & 1) {
-                               det = jiffies + (hertz / 100);
-                               while (time_before(jiffies, det)) {
-                                       set_current_state(TASK_INTERRUPTIBLE);
-                                       schedule_timeout(1);
-                                       if (signal_pending(current))
-                                               break;
-                               }
+                               msleep(10);
                                if (ixj_hookstate(j) & 1) {
                                        j->flags.ringing = 0;
                                        return 1;
                                }
                        }
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(1);
+                       schedule_timeout_interruptible(1);
                        if (signal_pending(current))
                                break;
                }
@@ -2168,10 +2151,8 @@ static int ixj_release(struct inode *inode, struct file *file_p)
         *    Set up locks to ensure that only one process is talking to the DSP at a time.
         *    This is necessary to keep the DSP from locking up.
         */
-       while(test_and_set_bit(board, (void *)&j->busyflags) != 0) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while(test_and_set_bit(board, (void *)&j->busyflags) != 0)
+               schedule_timeout_interruptible(1);
        if (ixjdebug & 0x0002)
                printk(KERN_INFO "Closing board %d\n", NUM(inode));
 
@@ -3301,14 +3282,10 @@ static void ixj_write_cidcw(IXJ *j)
        ixj_play_tone(j, 23);
 
        clear_bit(j->board, &j->busyflags);
-       while(j->tone_state) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
-       while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while(j->tone_state)
+               schedule_timeout_interruptible(1);
+       while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
+               schedule_timeout_interruptible(1);
        if(ixjdebug & 0x0200) {
                printk("IXJ cidcw phone%d first tone end at %ld\n", j->board, jiffies);
        }
@@ -3328,14 +3305,10 @@ static void ixj_write_cidcw(IXJ *j)
        ixj_play_tone(j, 24);
 
        clear_bit(j->board, &j->busyflags);
-       while(j->tone_state) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
-       while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while(j->tone_state)
+               schedule_timeout_interruptible(1);
+       while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
+               schedule_timeout_interruptible(1);
        if(ixjdebug & 0x0200) {
                printk("IXJ cidcw phone%d sent second tone at %ld\n", j->board, jiffies);
        }
@@ -3343,14 +3316,10 @@ static void ixj_write_cidcw(IXJ *j)
        j->cidcw_wait = jiffies + ((50 * hertz) / 100);
 
        clear_bit(j->board, &j->busyflags);
-       while(!j->flags.cidcw_ack && time_before(jiffies, j->cidcw_wait)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
-       while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while(!j->flags.cidcw_ack && time_before(jiffies, j->cidcw_wait))
+               schedule_timeout_interruptible(1);
+       while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
+               schedule_timeout_interruptible(1);
        j->cidcw_wait = 0;
        if(!j->flags.cidcw_ack) {
                if(ixjdebug & 0x0200) {
@@ -6125,10 +6094,8 @@ static int ixj_ioctl(struct inode *inode, struct file *file_p, unsigned int cmd,
         *    Set up locks to ensure that only one process is talking to the DSP at a time.
         *    This is necessary to keep the DSP from locking up.
         */
-       while(test_and_set_bit(board, (void *)&j->busyflags) != 0) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while(test_and_set_bit(board, (void *)&j->busyflags) != 0)
+               schedule_timeout_interruptible(1);
        if (ixjdebug & 0x0040)
                printk("phone%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
        if (minor >= IXJMAX) {
@@ -6694,8 +6661,6 @@ static struct file_operations ixj_fops =
 
 static int ixj_linetest(IXJ *j)
 {
-       unsigned long jifwait;
-
        j->flags.pstncheck = 1; /* Testing */
        j->flags.pstn_present = 0; /* Assume the line is not there */
 
@@ -6726,11 +6691,7 @@ static int ixj_linetest(IXJ *j)
 
                outb_p(j->pld_scrw.byte, j->XILINXbase);
                daa_set_mode(j, SOP_PU_CONVERSATION);
-               jifwait = jiffies + hertz;
-               while (time_before(jiffies, jifwait)) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(1);
-               }
+               msleep(1000);
                daa_int_read(j);
                daa_set_mode(j, SOP_PU_RESET);
                if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) {
@@ -6750,11 +6711,7 @@ static int ixj_linetest(IXJ *j)
        j->pld_slicw.bits.rly3 = 0;
        outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
        daa_set_mode(j, SOP_PU_CONVERSATION);
-       jifwait = jiffies + hertz;
-       while (time_before(jiffies, jifwait)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       msleep(1000);
        daa_int_read(j);
        daa_set_mode(j, SOP_PU_RESET);
        if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) {
@@ -6783,7 +6740,6 @@ static int ixj_linetest(IXJ *j)
 static int ixj_selfprobe(IXJ *j)
 {
        unsigned short cmd;
-       unsigned long jif;
        int cnt;
        BYTES bytes;
 
@@ -6933,29 +6889,13 @@ static int ixj_selfprobe(IXJ *j)
        } else {
                if (j->cardtype == QTI_LINEJACK) {
                        LED_SetState(0x1, j);
-                       jif = jiffies + (hertz / 10);
-                       while (time_before(jiffies, jif)) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule_timeout(1);
-                       }
+                       msleep(100);
                        LED_SetState(0x2, j);
-                       jif = jiffies + (hertz / 10);
-                       while (time_before(jiffies, jif)) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule_timeout(1);
-                       }
+                       msleep(100);
                        LED_SetState(0x4, j);
-                       jif = jiffies + (hertz / 10);
-                       while (time_before(jiffies, jif)) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule_timeout(1);
-                       }
+                       msleep(100);
                        LED_SetState(0x8, j);
-                       jif = jiffies + (hertz / 10);
-                       while (time_before(jiffies, jif)) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule_timeout(1);
-                       }
+                       msleep(100);
                        LED_SetState(0x0, j);
                        daa_get_version(j);
                        if (ixjdebug & 0x0002)
index 7398a7f..0fd0fa9 100644 (file)
@@ -260,7 +260,7 @@ static int stv_stop_video (struct usb_stv *dev)
                        PDEBUG (0, "STV(i): Camera set to original resolution");
        }
        /* origMode */
-       kfree (buf);
+       kfree(buf);
        return i;
 }
 
@@ -276,7 +276,7 @@ static int stv_set_video_mode (struct usb_stv *dev)
        }
 
        if ((i = stv_set_config (dev, 1, 0, 0)) < 0) {
-               kfree (buf);
+               kfree(buf);
                return i;
        }
 
@@ -301,13 +301,13 @@ static int stv_set_video_mode (struct usb_stv *dev)
        goto exit;
 
 error:
-       kfree (buf);
+       kfree(buf);
        if (stop_video == 1)
                stv_stop_video (dev);
        return -1;
 
 exit:
-       kfree (buf);
+       kfree(buf);
        return 0;
 }
 
@@ -327,7 +327,7 @@ static int stv_init (struct usb_stv *stv680)
 
        /* set config 1, interface 0, alternate 0 */
        if ((i = stv_set_config (stv680, 1, 0, 0)) < 0) {
-               kfree (buffer);
+               kfree(buffer);
                PDEBUG (0, "STV(e): set config 1,0,0 failed");
                return -1;
        }
@@ -435,11 +435,11 @@ static int stv_init (struct usb_stv *stv680)
 error:
        i = stv_sndctrl (0, stv680, 0x80, 0, buffer, 0x02);     /* Get Last Error */
        PDEBUG (1, "STV(i): last error: %i,  command = 0x%x", buffer[0], buffer[1]);
-       kfree (buffer);
+       kfree(buffer);
        return -1;
 
 exit:
-       kfree (buffer);
+       kfree(buffer);
 
        /* video = 320x240, 352x288 */
        if (stv680->CIF == 1) {
@@ -708,10 +708,10 @@ static int stv680_stop_stream (struct usb_stv *stv680)
                        usb_kill_urb (stv680->urb[i]);
                        usb_free_urb (stv680->urb[i]);
                        stv680->urb[i] = NULL;
-                       kfree (stv680->sbuf[i].data);
+                       kfree(stv680->sbuf[i].data);
                }
        for (i = 0; i < STV680_NUMSCRATCH; i++) {
-               kfree (stv680->scratch[i].data);
+               kfree(stv680->scratch[i].data);
                stv680->scratch[i].data = NULL;
        }
 
@@ -1068,7 +1068,7 @@ static int stv_close (struct inode *inode, struct file *file)
        stv680->user = 0;
 
        if (stv680->removed) {
-               kfree (stv680);
+               kfree(stv680);
                stv680 = NULL;
                PDEBUG (0, "STV(i): device unregistered");
        }
@@ -1445,14 +1445,14 @@ static inline void usb_stv680_remove_disconnected (struct usb_stv *stv680)
                        usb_kill_urb (stv680->urb[i]);
                        usb_free_urb (stv680->urb[i]);
                        stv680->urb[i] = NULL;
-                       kfree (stv680->sbuf[i].data);
+                       kfree(stv680->sbuf[i].data);
                }
        for (i = 0; i < STV680_NUMSCRATCH; i++)
-               kfree (stv680->scratch[i].data);
+               kfree(stv680->scratch[i].data);
        PDEBUG (0, "STV(i): %s disconnected", stv680->camera_name);
 
        /* Free the memory */
-       kfree (stv680);
+       kfree(stv680);
 }
 
 static void stv680_disconnect (struct usb_interface *intf)
index 0ea62d8..ca92940 100644 (file)
@@ -342,16 +342,11 @@ static void vga_cleanup(struct vgastate *state)
        if (state->vidstate != NULL) {
                struct regstate *saved = (struct regstate *) state->vidstate;
 
-               if (saved->vga_font0) 
-                       vfree(saved->vga_font0);
-               if (saved->vga_font1) 
-                       vfree(saved->vga_font1);
-               if (saved->vga_text)
-                       vfree(saved->vga_text);
-               if (saved->vga_cmap)
-                       vfree(saved->vga_cmap);
-               if (saved->attr)
-                       vfree(saved->attr);
+               vfree(saved->vga_font0);
+               vfree(saved->vga_font1);
+               vfree(saved->vga_text);
+               vfree(saved->vga_cmap);
+               vfree(saved->attr);
                kfree(saved);
                state->vidstate = NULL;
        }
index 1c62203..6cbfcea 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/cpu.h>
 #include <linux/bitops.h>
 #include <linux/mpage.h>
+#include <linux/bit_spinlock.h>
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
 static void invalidate_bh_lrus(void);
index 3217ac5..2335f14 100644 (file)
@@ -3215,10 +3215,8 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
        }
        
        cifs_sb->tcon = NULL;
-       if (ses) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(HZ / 2);
-       }
+       if (ses)
+               schedule_timeout_interruptible(msecs_to_jiffies(500));
        if (ses)
                sesInfoFree(ses);
 
index 5034365..8def89f 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/errno.h>
 #include <linux/vmalloc.h>
 #include <linux/zlib.h>
+#include <linux/cramfs_fs.h>
 
 static z_stream stream;
 static int initialized;
index a15a2e1..7376b61 100644 (file)
@@ -337,12 +337,10 @@ struct dentry * d_find_alias(struct inode *inode)
  */
 void d_prune_aliases(struct inode *inode)
 {
-       struct list_head *tmp, *head = &inode->i_dentry;
+       struct dentry *dentry;
 restart:
        spin_lock(&dcache_lock);
-       tmp = head;
-       while ((tmp = tmp->next) != head) {
-               struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
+       list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
                spin_lock(&dentry->d_lock);
                if (!atomic_read(&dentry->d_count)) {
                        __dget_locked(dentry);
@@ -463,10 +461,7 @@ void shrink_dcache_sb(struct super_block * sb)
         * superblock to the most recent end of the unused list.
         */
        spin_lock(&dcache_lock);
-       next = dentry_unused.next;
-       while (next != &dentry_unused) {
-               tmp = next;
-               next = tmp->next;
+       list_for_each_safe(tmp, next, &dentry_unused) {
                dentry = list_entry(tmp, struct dentry, d_lru);
                if (dentry->d_sb != sb)
                        continue;
@@ -478,10 +473,7 @@ void shrink_dcache_sb(struct super_block * sb)
         * Pass two ... free the dentries for this superblock.
         */
 repeat:
-       next = dentry_unused.next;
-       while (next != &dentry_unused) {
-               tmp = next;
-               next = tmp->next;
+       list_for_each_safe(tmp, next, &dentry_unused) {
                dentry = list_entry(tmp, struct dentry, d_lru);
                if (dentry->d_sb != sb)
                        continue;
index c6ec66f..49bbc2b 100644 (file)
@@ -1340,8 +1340,7 @@ int journal_stop(handle_t *handle)
        if (handle->h_sync) {
                do {
                        old_handle_count = transaction->t_handle_count;
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(1);
+                       schedule_timeout_uninterruptible(1);
                } while (old_handle_count != transaction->t_handle_count);
        }
 
index 456d7e6..27f199e 100644 (file)
@@ -1701,12 +1701,10 @@ jffs_find_file(struct jffs_control *c, __u32 ino)
 {
        struct jffs_file *f;
        int i = ino % c->hash_len;
-       struct list_head *tmp;
 
        D3(printk("jffs_find_file(): ino: %u\n", ino));
 
-       for (tmp = c->hash[i].next; tmp != &c->hash[i]; tmp = tmp->next) {
-               f = list_entry(tmp, struct jffs_file, hash);
+       list_for_each_entry(f, &c->hash[i], hash) {
                if (ino != f->ino)
                        continue;
                D3(printk("jffs_find_file(): Found file with ino "
@@ -2102,13 +2100,12 @@ jffs_foreach_file(struct jffs_control *c, int (*func)(struct jffs_file *))
        int result = 0;
 
        for (pos = 0; pos < c->hash_len; pos++) {
-               struct list_head *p, *next;
-               for (p = c->hash[pos].next; p != &c->hash[pos]; p = next) {
-                       /* We need a reference to the next file in the
-                          list because `func' might remove the current
-                          file `f'.  */
-                       next = p->next;
-                       r = func(list_entry(p, struct jffs_file, hash));
+               struct jffs_file *f, *next;
+
+               /* We must do _safe, because 'func' might remove the
+                  current file 'f' from the list.  */
+               list_for_each_entry_safe(f, next, &c->hash[pos], hash) {
+                       r = func(f);
                        if (r < 0)
                                return r;
                        result += r;
@@ -2613,9 +2610,8 @@ jffs_print_hash_table(struct jffs_control *c)
 
        printk("JFFS: Dumping the file system's hash table...\n");
        for (i = 0; i < c->hash_len; i++) {
-               struct list_head *p;
-               for (p = c->hash[i].next; p != &c->hash[i]; p = p->next) {
-                       struct jffs_file *f=list_entry(p,struct jffs_file,hash);
+               struct jffs_file *f;
+               list_for_each_entry(f, &c->hash[i], hash) {
                        printk("*** c->hash[%u]: \"%s\" "
                               "(ino: %u, pino: %u)\n",
                               i, (f->name ? f->name : ""),
index 14b3ce8..87332f3 100644 (file)
@@ -299,8 +299,7 @@ nlmclnt_alloc_call(void)
                        return call;
                }
                printk("nlmclnt_alloc_call: failed, waiting for memory\n");
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(5*HZ);
+               schedule_timeout_interruptible(5*HZ);
        }
        return NULL;
 }
index 3415626..2fa9fdf 100644 (file)
@@ -537,7 +537,6 @@ lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
 static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry)
 {
        struct vfsmount *res, *p, *q, *r, *s;
-       struct list_head *h;
        struct nameidata nd;
 
        res = q = clone_mnt(mnt, dentry);
@@ -546,8 +545,7 @@ static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry)
        q->mnt_mountpoint = mnt->mnt_mountpoint;
 
        p = mnt;
-       for (h = mnt->mnt_mounts.next; h != &mnt->mnt_mounts; h = h->next) {
-               r = list_entry(h, struct vfsmount, mnt_child);
+       list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
                if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
                        continue;
 
index 2681485..edc9551 100644 (file)
@@ -34,8 +34,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
                res = rpc_call_sync(clnt, msg, flags);
                if (res != -EJUKEBOX)
                        break;
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(NFS_JUKEBOX_RETRY_TIME);
+               schedule_timeout_interruptible(NFS_JUKEBOX_RETRY_TIME);
                res = -ERESTARTSYS;
        } while (!signalled());
        rpc_clnt_sigunmask(clnt, &oldset);
index 0c5a308..9701ca8 100644 (file)
@@ -2418,14 +2418,11 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
                *timeout = NFS4_POLL_RETRY_MAX;
        rpc_clnt_sigmask(clnt, &oldset);
        if (clnt->cl_intr) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(*timeout);
+               schedule_timeout_interruptible(*timeout);
                if (signalled())
                        res = -ERESTARTSYS;
-       } else {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(*timeout);
-       }
+       } else
+               schedule_timeout_uninterruptible(*timeout);
        rpc_clnt_sigunmask(clnt, &oldset);
        *timeout <<= 1;
        return res;
@@ -2578,8 +2575,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
 static unsigned long
 nfs4_set_lock_task_retry(unsigned long timeout)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(timeout);
+       schedule_timeout_interruptible(timeout);
        timeout <<= 1;
        if (timeout > NFS4_LOCK_MAXTIMEOUT)
                return NFS4_LOCK_MAXTIMEOUT;
index 5452364..b6cc8cf 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/swap.h>
 #include <linux/buffer_head.h>
 #include <linux/writeback.h>
+#include <linux/bit_spinlock.h>
 
 #include "aops.h"
 #include "attrib.h"
index 2c7a23d..66aa0b9 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -39,7 +39,11 @@ void pipe_wait(struct inode * inode)
 {
        DEFINE_WAIT(wait);
 
-       prepare_to_wait(PIPE_WAIT(*inode), &wait, TASK_INTERRUPTIBLE);
+       /*
+        * Pipes are system-local resources, so sleeping on them
+        * is considered a noninteractive wait:
+        */
+       prepare_to_wait(PIPE_WAIT(*inode), &wait, TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
        up(PIPE_SEM(*inode));
        schedule();
        finish_wait(PIPE_WAIT(*inode), &wait);
index a8e29e9..4b15761 100644 (file)
@@ -2868,8 +2868,7 @@ static void let_transaction_grow(struct super_block *sb, unsigned long trans_id)
        struct reiserfs_journal *journal = SB_JOURNAL(sb);
        unsigned long bcount = journal->j_bcount;
        while (1) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
+               schedule_timeout_uninterruptible(1);
                journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
                while ((atomic_read(&journal->j_wcount) > 0 ||
                        atomic_read(&journal->j_jlock)) &&
index 6951c35..44b02fc 100644 (file)
@@ -1934,8 +1934,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
                        if (SB_AP_BITMAP(s))
                                brelse(SB_AP_BITMAP(s)[j].bh);
                }
-               if (SB_AP_BITMAP(s))
-                       vfree(SB_AP_BITMAP(s));
+               vfree(SB_AP_BITMAP(s));
        }
        if (SB_BUFFER_WITH_SB(s))
                brelse(SB_BUFFER_WITH_SB(s));
index 220babe..38ab558 100644 (file)
@@ -2397,8 +2397,7 @@ smb_proc_readdir_long(struct file *filp, void *dirent, filldir_t filldir,
                if (req->rq_rcls == ERRSRV && req->rq_err == ERRerror) {
                        /* a damn Win95 bug - sometimes it clags if you 
                           ask it too fast */
-                       current->state = TASK_INTERRUPTIBLE;
-                       schedule_timeout(HZ/5);
+                       schedule_timeout_interruptible(msecs_to_jiffies(200));
                        continue;
                 }
 
index 6c6fd0f..b0d2873 100644 (file)
@@ -39,8 +39,7 @@ typedef struct timespec timespec_t;
 
 static inline void delay(long ticks)
 {
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(ticks);
+       schedule_timeout_uninterruptible(ticks);
 }
 
 static inline void nanotime(struct timespec *tvp)
index 655bf4a..e82cf72 100644 (file)
@@ -1780,10 +1780,10 @@ xfsbufd(
                        xfsbufd_force_sleep = 0;
                }
 
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout((xfs_buf_timer_centisecs * HZ) / 100);
+               schedule_timeout_interruptible
+                       (xfs_buf_timer_centisecs * msecs_to_jiffies(10));
 
-               age = (xfs_buf_age_centisecs * HZ) / 100;
+               age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
                spin_lock(&pbd_delwrite_lock);
                list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
                        PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
index 0da87bf..2302454 100644 (file)
@@ -467,7 +467,7 @@ xfs_flush_inode(
 
        igrab(inode);
        xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
-       delay(HZ/2);
+       delay(msecs_to_jiffies(500));
 }
 
 /*
@@ -492,7 +492,7 @@ xfs_flush_device(
 
        igrab(inode);
        xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
-       delay(HZ/2);
+       delay(msecs_to_jiffies(500));
        xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
 }
 
@@ -520,10 +520,9 @@ xfssyncd(
        struct vfs_sync_work    *work, *n;
        LIST_HEAD               (tmp);
 
-       timeleft = (xfs_syncd_centisecs * HZ) / 100;
+       timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
        for (;;) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               timeleft = schedule_timeout(timeleft);
+               timeleft = schedule_timeout_interruptible(timeleft);
                /* swsusp */
                try_to_freeze();
                if (kthread_should_stop())
@@ -537,7 +536,8 @@ xfssyncd(
                 */
                if (!timeleft || list_empty(&vfsp->vfs_sync_list)) {
                        if (!timeleft)
-                               timeleft = (xfs_syncd_centisecs * HZ) / 100;
+                               timeleft = xfs_syncd_centisecs *
+                                                       msecs_to_jiffies(10);
                        INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
                        list_add_tail(&vfsp->vfs_sync_work.w_list,
                                        &vfsp->vfs_sync_list);
index 80780db..8197c69 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/kernel.h>
 #include <asm/current.h>
 
-
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
  * We make no fairness assumptions. They have a cost.
  */
 
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       int on_cpu;
-       int line_no;
-       void *previous;
-       struct task_struct * task;
-       const char *base_file;
-#endif
-} spinlock_t;
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPIN_LOCK_UNLOCKED     (spinlock_t){ 0, -1, 0, NULL, NULL, NULL }
-#else
-#define SPIN_LOCK_UNLOCKED     (spinlock_t){ 0 }
-#endif
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_unlock_wait(x)    do { barrier(); } while ((x)->lock)
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-extern void _raw_spin_unlock(spinlock_t * lock);
-extern void debug_spin_lock(spinlock_t * lock, const char *, int);
-extern int debug_spin_trylock(spinlock_t * lock, const char *, int);
-#define _raw_spin_lock(LOCK) \
-       debug_spin_lock(LOCK, __BASE_FILE__, __LINE__)
-#define _raw_spin_trylock(LOCK) \
-       debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__)
-#else
-static inline void _raw_spin_unlock(spinlock_t * lock)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_is_locked(x)        ((x)->lock != 0)
+#define __raw_spin_unlock_wait(x) \
+               do { cpu_relax(); } while ((x)->lock)
+
+static inline void __raw_spin_unlock(raw_spinlock_t * lock)
 {
        mb();
        lock->lock = 0;
 }
 
-static inline void _raw_spin_lock(spinlock_t * lock)
+static inline void __raw_spin_lock(raw_spinlock_t * lock)
 {
        long tmp;
 
@@ -70,80 +44,64 @@ static inline void _raw_spin_lock(spinlock_t * lock)
        : "m"(lock->lock) : "memory");
 }
 
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        return !test_and_set_bit(0, &lock->lock);
 }
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
 
 /***********************************************************/
 
-typedef struct {
-       volatile unsigned int lock;
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED       (rwlock_t){ 0 }
-
-#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-
-static inline int read_can_lock(rwlock_t *lock)
+static inline int __raw_read_can_lock(raw_rwlock_t *lock)
 {
        return (lock->lock & 1) == 0;
 }
 
-static inline int write_can_lock(rwlock_t *lock)
+static inline int __raw_write_can_lock(raw_rwlock_t *lock)
 {
        return lock->lock == 0;
 }
 
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _raw_write_lock(rwlock_t * lock);
-extern void _raw_read_lock(rwlock_t * lock);
-#else
-static inline void _raw_write_lock(rwlock_t * lock)
+static inline void __raw_read_lock(raw_rwlock_t *lock)
 {
        long regx;
 
        __asm__ __volatile__(
        "1:     ldl_l   %1,%0\n"
-       "       bne     %1,6f\n"
-       "       lda     %1,1\n"
+       "       blbs    %1,6f\n"
+       "       subl    %1,2,%1\n"
        "       stl_c   %1,%0\n"
        "       beq     %1,6f\n"
        "       mb\n"
        ".subsection 2\n"
        "6:     ldl     %1,%0\n"
-       "       bne     %1,6b\n"
+       "       blbs    %1,6b\n"
        "       br      1b\n"
        ".previous"
        : "=m" (*lock), "=&r" (regx)
        : "m" (*lock) : "memory");
 }
 
-static inline void _raw_read_lock(rwlock_t * lock)
+static inline void __raw_write_lock(raw_rwlock_t *lock)
 {
        long regx;
 
        __asm__ __volatile__(
        "1:     ldl_l   %1,%0\n"
-       "       blbs    %1,6f\n"
-       "       subl    %1,2,%1\n"
+       "       bne     %1,6f\n"
+       "       lda     %1,1\n"
        "       stl_c   %1,%0\n"
        "       beq     %1,6f\n"
        "       mb\n"
        ".subsection 2\n"
        "6:     ldl     %1,%0\n"
-       "       blbs    %1,6b\n"
+       "       bne     %1,6b\n"
        "       br      1b\n"
        ".previous"
        : "=m" (*lock), "=&r" (regx)
        : "m" (*lock) : "memory");
 }
-#endif /* CONFIG_DEBUG_RWLOCK */
 
-static inline int _raw_read_trylock(rwlock_t * lock)
+static inline int __raw_read_trylock(raw_rwlock_t * lock)
 {
        long regx;
        int success;
@@ -165,7 +123,7 @@ static inline int _raw_read_trylock(rwlock_t * lock)
        return success;
 }
 
-static inline int _raw_write_trylock(rwlock_t * lock)
+static inline int __raw_write_trylock(raw_rwlock_t * lock)
 {
        long regx;
        int success;
@@ -187,13 +145,7 @@ static inline int _raw_write_trylock(rwlock_t * lock)
        return success;
 }
 
-static inline void _raw_write_unlock(rwlock_t * lock)
-{
-       mb();
-       lock->lock = 0;
-}
-
-static inline void _raw_read_unlock(rwlock_t * lock)
+static inline void __raw_read_unlock(raw_rwlock_t * lock)
 {
        long regx;
        __asm__ __volatile__(
@@ -209,4 +161,10 @@ static inline void _raw_read_unlock(rwlock_t * lock)
        : "m" (*lock) : "memory");
 }
 
+static inline void __raw_write_unlock(raw_rwlock_t * lock)
+{
+       mb();
+       lock->lock = 0;
+}
+
 #endif /* _ALPHA_SPINLOCK_H */
diff --git a/include/asm-alpha/spinlock_types.h b/include/asm-alpha/spinlock_types.h
new file mode 100644 (file)
index 0000000..8141eb5
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _ALPHA_SPINLOCK_TYPES_H
+#define _ALPHA_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index 1f906d0..cb4906b 100644 (file)
  * Unlocked value: 0
  * Locked value: 1
  */
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
 
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 }
+#define __raw_spin_is_locked(x)                ((x)->lock != 0)
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while (0)
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_unlock_wait(x)    do { barrier(); } while (spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
        unsigned long tmp;
 
@@ -47,7 +40,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
        smp_mb();
 }
 
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        unsigned long tmp;
 
@@ -67,7 +60,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
        }
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        smp_mb();
 
@@ -80,23 +73,14 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
 
 /*
  * RWLOCKS
- */
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED       (rwlock_t) { 0 }
-#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while (0)
-#define rwlock_is_locked(x)    (*((volatile unsigned int *)(x)) != 0)
-
-/*
+ *
+ *
  * Write locks are easy - we just set bit 31.  When unlocking, we can
  * just write zero since the lock is exclusively held.
  */
-static inline void _raw_write_lock(rwlock_t *rw)
+#define rwlock_is_locked(x)    (*((volatile unsigned int *)(x)) != 0)
+
+static inline void __raw_write_lock(rwlock_t *rw)
 {
        unsigned long tmp;
 
@@ -113,7 +97,7 @@ static inline void _raw_write_lock(rwlock_t *rw)
        smp_mb();
 }
 
-static inline int _raw_write_trylock(rwlock_t *rw)
+static inline int __raw_write_trylock(rwlock_t *rw)
 {
        unsigned long tmp;
 
@@ -133,7 +117,7 @@ static inline int _raw_write_trylock(rwlock_t *rw)
        }
 }
 
-static inline void _raw_write_unlock(rwlock_t *rw)
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
        smp_mb();
 
@@ -156,7 +140,7 @@ static inline void _raw_write_unlock(rwlock_t *rw)
  * currently active.  However, we know we won't have any write
  * locks.
  */
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
        unsigned long tmp, tmp2;
 
@@ -173,7 +157,7 @@ static inline void _raw_read_lock(rwlock_t *rw)
        smp_mb();
 }
 
-static inline void _raw_read_unlock(rwlock_t *rw)
+static inline void __raw_read_unlock(rwlock_t *rw)
 {
        unsigned long tmp, tmp2;
 
@@ -190,6 +174,6 @@ static inline void _raw_read_unlock(rwlock_t *rw)
        : "cc");
 }
 
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
 #endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-arm/spinlock_types.h b/include/asm-arm/spinlock_types.h
new file mode 100644 (file)
index 0000000..43e83f6
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index 278de61..c49df63 100644 (file)
 #define __NR_inotify_init              (__NR_SYSCALL_BASE+316)
 #define __NR_inotify_add_watch         (__NR_SYSCALL_BASE+317)
 #define __NR_inotify_rm_watch          (__NR_SYSCALL_BASE+318)
+#define __NR_mbind                     (__NR_SYSCALL_BASE+319)
+#define __NR_get_mempolicy             (__NR_SYSCALL_BASE+320)
+#define __NR_set_mempolicy             (__NR_SYSCALL_BASE+321)
 
 /*
  * The following SWIs are ARM private.
index 791ee1d..dc28daa 100644 (file)
@@ -22,8 +22,6 @@ typedef struct {
 # error HARDIRQ_BITS is too low!
 #endif
 
-#define irq_enter()            (preempt_count() += HARDIRQ_OFFSET)
-
 #ifndef CONFIG_SMP
 
 extern asmlinkage void __do_softirq(void);
index 6f857be..a9c5549 100644 (file)
                VMLINUX_SYMBOL(__kprobes_text_start) = .;               \
                *(.kprobes.text)                                        \
                VMLINUX_SYMBOL(__kprobes_text_end) = .;
+
+               /* DWARF debug sections.
+               Symbols in the DWARF debugging sections are relative to
+               the beginning of the section so we begin them at 0.  */
+#define DWARF_DEBUG                                                    \
+               /* DWARF 1 */                                           \
+               .debug          0 : { *(.debug) }                       \
+               .line           0 : { *(.line) }                        \
+               /* GNU DWARF 1 extensions */                            \
+               .debug_srcinfo  0 : { *(.debug_srcinfo) }               \
+               .debug_sfnames  0 : { *(.debug_sfnames) }               \
+               /* DWARF 1.1 and DWARF 2 */                             \
+               .debug_aranges  0 : { *(.debug_aranges) }               \
+               .debug_pubnames 0 : { *(.debug_pubnames) }              \
+               /* DWARF 2 */                                           \
+               .debug_info     0 : { *(.debug_info                     \
+                               .gnu.linkonce.wi.*) }                   \
+               .debug_abbrev   0 : { *(.debug_abbrev) }                \
+               .debug_line     0 : { *(.debug_line) }                  \
+               .debug_frame    0 : { *(.debug_frame) }                 \
+               .debug_str      0 : { *(.debug_str) }                   \
+               .debug_loc      0 : { *(.debug_loc) }                   \
+               .debug_macinfo  0 : { *(.debug_macinfo) }               \
+               /* SGI/MIPS DWARF 2 extensions */                       \
+               .debug_weaknames 0 : { *(.debug_weaknames) }            \
+               .debug_funcnames 0 : { *(.debug_funcnames) }            \
+               .debug_typenames 0 : { *(.debug_typenames) }            \
+               .debug_varnames  0 : { *(.debug_varnames) }             \
+
+               /* Stabs debugging sections.  */
+#define STABS_DEBUG                                                    \
+               .stab 0 : { *(.stab) }                                  \
+               .stabstr 0 : { *(.stabstr) }                            \
+               .stab.excl 0 : { *(.stab.excl) }                        \
+               .stab.exclstr 0 : { *(.stab.exclstr) }                  \
+               .stab.index 0 : { *(.stab.index) }                      \
+               .stab.indexstr 0 : { *(.stab.indexstr) }                \
+               .comment 0 : { *(.comment) }
index 28ed8b2..75c67c7 100644 (file)
@@ -35,7 +35,7 @@
  */
 #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
 
-extern inline long
+static inline long
 div_ll_X_l_rem(long long divs, long div, long *rem)
 {
        long dum2;
index 37bef8e..0a4ec76 100644 (file)
@@ -679,7 +679,7 @@ static inline void rep_nop(void)
    However we don't do prefetches for pre XP Athlons currently
    That should be fixed. */
 #define ARCH_HAS_PREFETCH
-extern inline void prefetch(const void *x)
+static inline void prefetch(const void *x)
 {
        alternative_input(ASM_NOP4,
                          "prefetchnta (%1)",
@@ -693,7 +693,7 @@ extern inline void prefetch(const void *x)
 
 /* 3dnow! prefetch to get an exclusive cache line. Useful for 
    spinlocks to avoid one state transition in the cache coherency protocol. */
-extern inline void prefetchw(const void *x)
+static inline void prefetchw(const void *x)
 {
        alternative_input(ASM_NOP4,
                          "prefetchw (%1)",
index f9ff31f..2360435 100644 (file)
@@ -7,46 +7,21 @@
 #include <linux/config.h>
 #include <linux/compiler.h>
 
-asmlinkage int printk(const char * fmt, ...)
-       __attribute__ ((format (printf, 1, 2)));
-
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-typedef struct {
-       volatile unsigned int slock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPINLOCK_MAGIC_INIT    , SPINLOCK_MAGIC
-#else
-#define SPINLOCK_MAGIC_INIT    /* */
-#endif
-
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-/*
+ *
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
  *
  * We make no fairness assumptions. They have a cost.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
  */
 
-#define spin_is_locked(x)      (*(volatile signed char *)(&(x)->slock) <= 0)
-#define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
+#define __raw_spin_is_locked(x) \
+               (*(volatile signed char *)(&(x)->slock) <= 0)
 
-#define spin_lock_string \
+#define __raw_spin_lock_string \
        "\n1:\t" \
        "lock ; decb %0\n\t" \
        "jns 3f\n" \
@@ -57,7 +32,7 @@ typedef struct {
        "jmp 1b\n" \
        "3:\n\t"
 
-#define spin_lock_string_flags \
+#define __raw_spin_lock_string_flags \
        "\n1:\t" \
        "lock ; decb %0\n\t" \
        "jns 4f\n\t" \
@@ -73,86 +48,71 @@ typedef struct {
        "jmp 1b\n" \
        "4:\n\t"
 
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+       __asm__ __volatile__(
+               __raw_spin_lock_string
+               :"=m" (lock->slock) : : "memory");
+}
+
+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+{
+       __asm__ __volatile__(
+               __raw_spin_lock_string_flags
+               :"=m" (lock->slock) : "r" (flags) : "memory");
+}
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+       char oldval;
+       __asm__ __volatile__(
+               "xchgb %b0,%1"
+               :"=q" (oldval), "=m" (lock->slock)
+               :"0" (0) : "memory");
+       return oldval > 0;
+}
+
 /*
- * This works. Despite all the confusion.
- * (except on PPro SMP or if we are using OOSTORE)
+ * __raw_spin_unlock based on writing $1 to the low byte.
+ * This method works. Despite all the confusion.
+ * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
  * (PPro errata 66, 92)
  */
 
 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
 
-#define spin_unlock_string \
+#define __raw_spin_unlock_string \
        "movb $1,%0" \
                :"=m" (lock->slock) : : "memory"
 
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(lock->magic != SPINLOCK_MAGIC);
-       BUG_ON(!spin_is_locked(lock));
-#endif
        __asm__ __volatile__(
-               spin_unlock_string
+               __raw_spin_unlock_string
        );
 }
 
 #else
 
-#define spin_unlock_string \
+#define __raw_spin_unlock_string \
        "xchgb %b0, %1" \
                :"=q" (oldval), "=m" (lock->slock) \
                :"0" (oldval) : "memory"
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        char oldval = 1;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(lock->magic != SPINLOCK_MAGIC);
-       BUG_ON(!spin_is_locked(lock));
-#endif
-       __asm__ __volatile__(
-               spin_unlock_string
-       );
-}
 
-#endif
-
-static inline int _raw_spin_trylock(spinlock_t *lock)
-{
-       char oldval;
        __asm__ __volatile__(
-               "xchgb %b0,%1"
-               :"=q" (oldval), "=m" (lock->slock)
-               :"0" (0) : "memory");
-       return oldval > 0;
+               __raw_spin_unlock_string
+       );
 }
 
-static inline void _raw_spin_lock(spinlock_t *lock)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
-       if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
-               printk("eip: %p\n", __builtin_return_address(0));
-               BUG();
-       }
 #endif
-       __asm__ __volatile__(
-               spin_lock_string
-               :"=m" (lock->slock) : : "memory");
-}
 
-static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
-       if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
-               printk("eip: %p\n", __builtin_return_address(0));
-               BUG();
-       }
-#endif
-       __asm__ __volatile__(
-               spin_lock_string_flags
-               :"=m" (lock->slock) : "r" (flags) : "memory");
-}
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
 /*
  * Read-write spinlocks, allowing multiple readers
@@ -163,72 +123,41 @@ static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
  * can "mix" irq-safe locks - any writer needs to get a
  * irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
+ *
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ *
+ * Changed to use the same technique as rw semaphores.  See
+ * semaphore.h for details.  -ben
+ *
+ * the helpers are in arch/i386/kernel/semaphore.c
  */
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC   0xdeaf1eed
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define RWLOCK_MAGIC_INIT      , RWLOCK_MAGIC
-#else
-#define RWLOCK_MAGIC_INIT      /* */
-#endif
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
-
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
 
 /**
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define read_can_lock(x) ((int)(x)->lock > 0)
+#define __raw_read_can_lock(x)         ((int)(x)->lock > 0)
 
 /**
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+#define __raw_write_can_lock(x)                ((x)->lock == RW_LOCK_BIAS)
 
-/*
- * On x86, we implement read-write locks as a 32-bit counter
- * with the high bit (sign) being the "contended" bit.
- *
- * The inline assembly is non-obvious. Think about it.
- *
- * Changed to use the same technique as rw semaphores.  See
- * semaphore.h for details.  -ben
- */
-/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
-
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
        __build_read_lock(rw, "__read_lock_failed");
 }
 
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
        __build_write_lock(rw, "__write_lock_failed");
 }
 
-#define _raw_read_unlock(rw)           asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
-#define _raw_write_unlock(rw)  asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
-
-static inline int _raw_read_trylock(rwlock_t *lock)
+static inline int __raw_read_trylock(raw_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
        atomic_dec(count);
@@ -238,7 +167,7 @@ static inline int _raw_read_trylock(rwlock_t *lock)
        return 0;
 }
 
-static inline int _raw_write_trylock(rwlock_t *lock)
+static inline int __raw_write_trylock(raw_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
        if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -247,4 +176,15 @@ static inline int _raw_write_trylock(rwlock_t *lock)
        return 0;
 }
 
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
+{
+       asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+}
+
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
+{
+       asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0"
+                                : "=m" (rw->lock) : : "memory");
+}
+
 #endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h
new file mode 100644 (file)
index 0000000..59efe84
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 1 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { RW_LOCK_BIAS }
+
+#endif
index d2430aa..5b78611 100644 (file)
 #include <asm/intrinsics.h>
 #include <asm/system.h>
 
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED                     (spinlock_t) { 0 }
-#define spin_lock_init(x)                      ((x)->lock = 0)
+#define __raw_spin_lock_init(x)                        ((x)->lock = 0)
 
 #ifdef ASM_SUPPORTED
 /*
  * Try to get the lock.  If we fail to get the lock, make a non-standard call to
  * ia64_spinlock_contention().  We do not use a normal call because that would force all
- * callers of spin_lock() to be non-leaf routines.  Instead, ia64_spinlock_contention() is
- * carefully coded to touch only those registers that spin_lock() marks "clobbered".
+ * callers of __raw_spin_lock() to be non-leaf routines.  Instead, ia64_spinlock_contention() is
+ * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered".
  */
 
 #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
 
 static inline void
-_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
+__raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
 {
        register volatile unsigned int *ptr asm ("r31") = &lock->lock;
 
@@ -94,17 +86,17 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
 #endif
 }
 
-#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
+#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
 
 /* Unlock by doing an ordered store and releasing the cacheline with nta */
-static inline void _raw_spin_unlock(spinlock_t *x) {
+static inline void __raw_spin_unlock(raw_spinlock_t *x) {
        barrier();
        asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x));
 }
 
 #else /* !ASM_SUPPORTED */
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-# define _raw_spin_lock(x)                                                             \
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+# define __raw_spin_lock(x)                                                            \
 do {                                                                                   \
        __u32 *ia64_spinlock_ptr = (__u32 *) (x);                                       \
        __u64 ia64_spinlock_val;                                                        \
@@ -117,29 +109,20 @@ do {                                                                                      \
                } while (ia64_spinlock_val);                                            \
        }                                                                               \
 } while (0)
-#define _raw_spin_unlock(x)    do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
+#define __raw_spin_unlock(x)   do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0)
 #endif /* !ASM_SUPPORTED */
 
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define _raw_spin_trylock(x)   (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
-#define spin_unlock_wait(x)    do { barrier(); } while ((x)->lock)
-
-typedef struct {
-       volatile unsigned int read_counter      : 24;
-       volatile unsigned int write_lock        :  8;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
+#define __raw_spin_is_locked(x)                ((x)->lock != 0)
+#define __raw_spin_trylock(x)          (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
-#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-#define read_can_lock(rw)      (*(volatile int *)(rw) >= 0)
-#define write_can_lock(rw)     (*(volatile int *)(rw) == 0)
+#define __raw_read_can_lock(rw)                (*(volatile int *)(rw) >= 0)
+#define __raw_write_can_lock(rw)       (*(volatile int *)(rw) == 0)
 
-#define _raw_read_lock(rw)                                                             \
+#define __raw_read_lock(rw)                                                            \
 do {                                                                                   \
-       rwlock_t *__read_lock_ptr = (rw);                                               \
+       raw_rwlock_t *__read_lock_ptr = (rw);                                           \
                                                                                        \
        while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {          \
                ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);                        \
@@ -148,14 +131,14 @@ do {                                                                                      \
        }                                                                               \
 } while (0)
 
-#define _raw_read_unlock(rw)                                   \
+#define __raw_read_unlock(rw)                                  \
 do {                                                           \
-       rwlock_t *__read_lock_ptr = (rw);                       \
+       raw_rwlock_t *__read_lock_ptr = (rw);                   \
        ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);        \
 } while (0)
 
 #ifdef ASM_SUPPORTED
-#define _raw_write_lock(rw)                                                    \
+#define __raw_write_lock(rw)                                                   \
 do {                                                                           \
        __asm__ __volatile__ (                                                  \
                "mov ar.ccv = r0\n"                                             \
@@ -170,7 +153,7 @@ do {                                                                                \
                :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory");            \
 } while(0)
 
-#define _raw_write_trylock(rw)                                                 \
+#define __raw_write_trylock(rw)                                                        \
 ({                                                                             \
        register long result;                                                   \
                                                                                \
@@ -182,7 +165,7 @@ do {                                                                                \
        (result == 0);                                                          \
 })
 
-static inline void _raw_write_unlock(rwlock_t *x)
+static inline void __raw_write_unlock(raw_rwlock_t *x)
 {
        u8 *y = (u8 *)x;
        barrier();
@@ -191,7 +174,7 @@ static inline void _raw_write_unlock(rwlock_t *x)
 
 #else /* !ASM_SUPPORTED */
 
-#define _raw_write_lock(l)                                                             \
+#define __raw_write_lock(l)                                                            \
 ({                                                                                     \
        __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);                       \
        __u32 *ia64_write_lock_ptr = (__u32 *) (l);                                     \
@@ -202,7 +185,7 @@ static inline void _raw_write_unlock(rwlock_t *x)
        } while (ia64_val);                                                             \
 })
 
-#define _raw_write_trylock(rw)                                         \
+#define __raw_write_trylock(rw)                                                \
 ({                                                                     \
        __u64 ia64_val;                                                 \
        __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);                  \
@@ -210,7 +193,7 @@ static inline void _raw_write_unlock(rwlock_t *x)
        (ia64_val == 0);                                                \
 })
 
-static inline void _raw_write_unlock(rwlock_t *x)
+static inline void __raw_write_unlock(raw_rwlock_t *x)
 {
        barrier();
        x->write_lock = 0;
@@ -218,6 +201,6 @@ static inline void _raw_write_unlock(rwlock_t *x)
 
 #endif /* !ASM_SUPPORTED */
 
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
 #endif /*  _ASM_IA64_SPINLOCK_H */
diff --git a/include/asm-ia64/spinlock_types.h b/include/asm-ia64/spinlock_types.h
new file mode 100644 (file)
index 0000000..474e46f
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef _ASM_IA64_SPINLOCK_TYPES_H
+#define _ASM_IA64_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int read_counter      : 31;
+       volatile unsigned int write_lock        :  1;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0, 0 }
+
+#endif
index 6608d83..7de7def 100644 (file)
 #include <asm/atomic.h>
 #include <asm/page.h>
 
-extern int printk(const char * fmt, ...)
-       __attribute__ ((format (printf, 1, 2)));
-
-#define RW_LOCK_BIAS            0x01000000
-#define RW_LOCK_BIAS_STR       "0x01000000"
-
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-typedef struct {
-       volatile int slock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPINLOCK_MAGIC_INIT    , SPINLOCK_MAGIC
-#else
-#define SPINLOCK_MAGIC_INIT    /* */
-#endif
-
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-/*
+ *
+ * (the type definitions are in asm/spinlock_types.h)
+ *
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
  *
  * We make no fairness assumptions. They have a cost.
  */
 
-#define spin_is_locked(x)      (*(volatile int *)(&(x)->slock) <= 0)
-#define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_is_locked(x)                (*(volatile int *)(&(x)->slock) <= 0)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_unlock_wait(x) \
+               do { cpu_relax(); } while (__raw_spin_is_locked(x))
 
 /**
- * _raw_spin_trylock - Try spin lock and return a result
+ * __raw_spin_trylock - Try spin lock and return a result
  * @lock: Pointer to the lock variable
  *
- * _raw_spin_trylock() tries to get the lock and returns a result.
+ * __raw_spin_trylock() tries to get the lock and returns a result.
  * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
  */
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        int oldval;
        unsigned long tmp1, tmp2;
@@ -78,7 +51,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
         * }
         */
        __asm__ __volatile__ (
-               "# spin_trylock                 \n\t"
+               "# __raw_spin_trylock           \n\t"
                "ldi    %1, #0;                 \n\t"
                "mvfc   %2, psw;                \n\t"
                "clrpsw #0x40 -> nop;           \n\t"
@@ -97,16 +70,10 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
        return (oldval > 0);
 }
 
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
        unsigned long tmp0, tmp1;
 
-#ifdef CONFIG_DEBUG_SPINLOCK
-       if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
-               printk("pc: %p\n", __builtin_return_address(0));
-               BUG();
-       }
-#endif
        /*
         * lock->slock :  =1 : unlock
         *             : <=0 : lock
@@ -118,7 +85,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
         * }
         */
        __asm__ __volatile__ (
-               "# spin_lock                    \n\t"
+               "# __raw_spin_lock              \n\t"
                ".fillinsn                      \n"
                "1:                             \n\t"
                "mvfc   %1, psw;                \n\t"
@@ -145,12 +112,8 @@ static inline void _raw_spin_lock(spinlock_t *lock)
        );
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(lock->magic != SPINLOCK_MAGIC);
-       BUG_ON(!spin_is_locked(lock));
-#endif
        mb();
        lock->slock = 1;
 }
@@ -164,59 +127,32 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
  * can "mix" irq-safe locks - any writer needs to get a
  * irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
+ *
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ *
+ * Changed to use the same technique as rw semaphores.  See
+ * semaphore.h for details.  -ben
  */
-typedef struct {
-       volatile int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC   0xdeaf1eed
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define RWLOCK_MAGIC_INIT      , RWLOCK_MAGIC
-#else
-#define RWLOCK_MAGIC_INIT      /* */
-#endif
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
-
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
 
 /**
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define read_can_lock(x) ((int)(x)->lock > 0)
+#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
 
 /**
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
-/*
- * On x86, we implement read-write locks as a 32-bit counter
- * with the high bit (sign) being the "contended" bit.
- *
- * The inline assembly is non-obvious. Think about it.
- *
- * Changed to use the same technique as rw semaphores.  See
- * semaphore.h for details.  -ben
- */
-/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
+#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
 
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
        unsigned long tmp0, tmp1;
 
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
        /*
         * rw->lock :  >0 : unlock
         *          : <=0 : lock
@@ -264,13 +200,10 @@ static inline void _raw_read_lock(rwlock_t *rw)
        );
 }
 
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
        unsigned long tmp0, tmp1, tmp2;
 
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
        /*
         * rw->lock :  =RW_LOCK_BIAS_STR : unlock
         *          : !=RW_LOCK_BIAS_STR : lock
@@ -320,7 +253,7 @@ static inline void _raw_write_lock(rwlock_t *rw)
        );
 }
 
-static inline void _raw_read_unlock(rwlock_t *rw)
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
        unsigned long tmp0, tmp1;
 
@@ -342,7 +275,7 @@ static inline void _raw_read_unlock(rwlock_t *rw)
        );
 }
 
-static inline void _raw_write_unlock(rwlock_t *rw)
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
        unsigned long tmp0, tmp1, tmp2;
 
@@ -366,9 +299,9 @@ static inline void _raw_write_unlock(rwlock_t *rw)
        );
 }
 
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
-static inline int _raw_write_trylock(rwlock_t *lock)
+static inline int __raw_write_trylock(raw_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
        if (atomic_sub_and_test(RW_LOCK_BIAS, count))
diff --git a/include/asm-m32r/spinlock_types.h b/include/asm-m32r/spinlock_types.h
new file mode 100644 (file)
index 0000000..7e9941c
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef _ASM_M32R_SPINLOCK_TYPES_H
+#define _ASM_M32R_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 1 }
+
+typedef struct {
+       volatile int lock;
+} raw_rwlock_t;
+
+#define RW_LOCK_BIAS                   0x01000000
+#define RW_LOCK_BIAS_STR               "0x01000000"
+
+#define __RAW_RW_LOCK_UNLOCKED         { RW_LOCK_BIAS }
+
+#endif
index 114d3eb..4d0135b 100644 (file)
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
  */
 
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
-
-#define spin_lock_init(x)      do { (x)->lock = 0; } while(0)
-
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_unlock_wait(x)    do { barrier(); } while ((x)->lock)
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_is_locked(x)        ((x)->lock != 0)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_unlock_wait(x) \
+               do { cpu_relax(); } while ((x)->lock)
 
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
@@ -38,13 +28,13 @@ typedef struct {
  * We make no fairness assumptions.  They have a cost.
  */
 
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
        unsigned int tmp;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_spin_lock        \n"
+               "       .set    noreorder       # __raw_spin_lock       \n"
                "1:     ll      %1, %2                                  \n"
                "       bnez    %1, 1b                                  \n"
                "        li     %1, 1                                   \n"
@@ -58,7 +48,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_spin_lock        \n"
+               "       .set    noreorder       # __raw_spin_lock       \n"
                "1:     ll      %1, %2                                  \n"
                "       bnez    %1, 1b                                  \n"
                "        li     %1, 1                                   \n"
@@ -72,10 +62,10 @@ static inline void _raw_spin_lock(spinlock_t *lock)
        }
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        __asm__ __volatile__(
-       "       .set    noreorder       # _raw_spin_unlock      \n"
+       "       .set    noreorder       # __raw_spin_unlock     \n"
        "       sync                                            \n"
        "       sw      $0, %0                                  \n"
        "       .set\treorder                                   \n"
@@ -84,13 +74,13 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
        : "memory");
 }
 
-static inline unsigned int _raw_spin_trylock(spinlock_t *lock)
+static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        unsigned int temp, res;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_spin_trylock     \n"
+               "       .set    noreorder       # __raw_spin_trylock    \n"
                "1:     ll      %0, %3                                  \n"
                "       ori     %2, %0, 1                               \n"
                "       sc      %2, %1                                  \n"
@@ -104,7 +94,7 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_spin_trylock     \n"
+               "       .set    noreorder       # __raw_spin_trylock    \n"
                "1:     ll      %0, %3                                  \n"
                "       ori     %2, %0, 1                               \n"
                "       sc      %2, %1                                  \n"
@@ -129,24 +119,13 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock)
  * read-locks.
  */
 
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-
-#define rwlock_init(x)  do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
        unsigned int tmp;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_read_lock        \n"
+               "       .set    noreorder       # __raw_read_lock       \n"
                "1:     ll      %1, %2                                  \n"
                "       bltz    %1, 1b                                  \n"
                "        addu   %1, 1                                   \n"
@@ -160,7 +139,7 @@ static inline void _raw_read_lock(rwlock_t *rw)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_read_lock        \n"
+               "       .set    noreorder       # __raw_read_lock       \n"
                "1:     ll      %1, %2                                  \n"
                "       bltz    %1, 1b                                  \n"
                "        addu   %1, 1                                   \n"
@@ -177,13 +156,13 @@ static inline void _raw_read_lock(rwlock_t *rw)
 /* Note the use of sub, not subu which will make the kernel die with an
    overflow exception if we ever try to unlock an rwlock that is already
    unlocked or is being held by a writer.  */
-static inline void _raw_read_unlock(rwlock_t *rw)
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
        unsigned int tmp;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "1:     ll      %1, %2          # _raw_read_unlock      \n"
+               "1:     ll      %1, %2          # __raw_read_unlock     \n"
                "       sub     %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
                "       beqzl   %1, 1b                                  \n"
@@ -193,7 +172,7 @@ static inline void _raw_read_unlock(rwlock_t *rw)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_read_unlock      \n"
+               "       .set    noreorder       # __raw_read_unlock     \n"
                "1:     ll      %1, %2                                  \n"
                "       sub     %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
@@ -206,13 +185,13 @@ static inline void _raw_read_unlock(rwlock_t *rw)
        }
 }
 
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
        unsigned int tmp;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_write_lock       \n"
+               "       .set    noreorder       # __raw_write_lock      \n"
                "1:     ll      %1, %2                                  \n"
                "       bnez    %1, 1b                                  \n"
                "        lui    %1, 0x8000                              \n"
@@ -226,7 +205,7 @@ static inline void _raw_write_lock(rwlock_t *rw)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_write_lock       \n"
+               "       .set    noreorder       # __raw_write_lock      \n"
                "1:     ll      %1, %2                                  \n"
                "       bnez    %1, 1b                                  \n"
                "        lui    %1, 0x8000                              \n"
@@ -241,26 +220,26 @@ static inline void _raw_write_lock(rwlock_t *rw)
        }
 }
 
-static inline void _raw_write_unlock(rwlock_t *rw)
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
        __asm__ __volatile__(
-       "       sync                    # _raw_write_unlock     \n"
+       "       sync                    # __raw_write_unlock    \n"
        "       sw      $0, %0                                  \n"
        : "=m" (rw->lock)
        : "m" (rw->lock)
        : "memory");
 }
 
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
-static inline int _raw_write_trylock(rwlock_t *rw)
+static inline int __raw_write_trylock(raw_rwlock_t *rw)
 {
        unsigned int tmp;
        int ret;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_write_trylock    \n"
+               "       .set    noreorder       # __raw_write_trylock   \n"
                "       li      %2, 0                                   \n"
                "1:     ll      %1, %3                                  \n"
                "       bnez    %1, 2f                                  \n"
@@ -277,7 +256,7 @@ static inline int _raw_write_trylock(rwlock_t *rw)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_write_trylock    \n"
+               "       .set    noreorder       # __raw_write_trylock   \n"
                "       li      %2, 0                                   \n"
                "1:     ll      %1, %3                                  \n"
                "       bnez    %1, 2f                                  \n"
diff --git a/include/asm-mips/spinlock_types.h b/include/asm-mips/spinlock_types.h
new file mode 100644 (file)
index 0000000..ce26c50
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _ASM_SPINLOCK_TYPES_H
+#define _ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index e24f757..048a2c7 100644 (file)
 #  define ATOMIC_HASH_SIZE 4
 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
 
-extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
 
-/* Can't use _raw_spin_lock_irq because of #include problems, so
+/* Can't use raw_spin_lock_irq because of #include problems, so
  * this is the substitute */
 #define _atomic_spin_lock_irqsave(l,f) do {    \
-       spinlock_t *s = ATOMIC_HASH(l);         \
+       raw_spinlock_t *s = ATOMIC_HASH(l);             \
        local_irq_save(f);                      \
-       _raw_spin_lock(s);                      \
+       __raw_spin_lock(s);                     \
 } while(0)
 
 #define _atomic_spin_unlock_irqrestore(l,f) do {       \
-       spinlock_t *s = ATOMIC_HASH(l);                 \
-       _raw_spin_unlock(s);                            \
+       raw_spinlock_t *s = ATOMIC_HASH(l);                     \
+       __raw_spin_unlock(s);                           \
        local_irq_restore(f);                           \
 } while(0)
 
index 928e5ef..af7db69 100644 (file)
@@ -2,7 +2,7 @@
 #define _PARISC_BITOPS_H
 
 #include <linux/compiler.h>
-#include <asm/system.h>
+#include <asm/spinlock.h>
 #include <asm/byteorder.h>
 #include <asm/atomic.h>
 
index 0673271..aa592d8 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/config.h>
 #include <linux/mm.h>
+#include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */
 
 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
  * Unfortunately, that doesn't apply to PA-RISC. */
index 0b61f51..a9dfadd 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef __ASSEMBLY__
 #include <linux/config.h>
 #include <linux/threads.h>
+#include <linux/spinlock_types.h>
 
 #include <asm/hardware.h>
 #include <asm/page.h>
index 679ea1c..43eaa6e 100644 (file)
@@ -2,30 +2,25 @@
 #define __ASM_SPINLOCK_H
 
 #include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/spinlock_types.h>
 
 /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
  * since it only has load-and-zero. Moreover, at least on some PA processors,
  * the semaphore address has to be 16-byte aligned.
  */
 
-#ifndef CONFIG_DEBUG_SPINLOCK
-
-#define __SPIN_LOCK_UNLOCKED   { { 1, 1, 1, 1 } }
-#undef SPIN_LOCK_UNLOCKED
-#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-static inline int spin_is_locked(spinlock_t *x)
+static inline int __raw_spin_is_locked(raw_spinlock_t *x)
 {
        volatile unsigned int *a = __ldcw_align(x);
        return *a == 0;
 }
 
-#define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_unlock_wait(x) \
+               do { cpu_relax(); } while (__raw_spin_is_locked(x))
 
-static inline void _raw_spin_lock(spinlock_t *x)
+static inline void __raw_spin_lock(raw_spinlock_t *x)
 {
        volatile unsigned int *a;
 
@@ -36,7 +31,7 @@ static inline void _raw_spin_lock(spinlock_t *x)
        mb();
 }
 
-static inline void _raw_spin_unlock(spinlock_t *x)
+static inline void __raw_spin_unlock(raw_spinlock_t *x)
 {
        volatile unsigned int *a;
        mb();
@@ -45,7 +40,7 @@ static inline void _raw_spin_unlock(spinlock_t *x)
        mb();
 }
 
-static inline int _raw_spin_trylock(spinlock_t *x)
+static inline int __raw_spin_trylock(raw_spinlock_t *x)
 {
        volatile unsigned int *a;
        int ret;
@@ -57,131 +52,38 @@ static inline int _raw_spin_trylock(spinlock_t *x)
 
        return ret;
 }
-       
-#define spin_lock_own(LOCK, LOCATION)  ((void)0)
-
-#else /* !(CONFIG_DEBUG_SPINLOCK) */
-
-#define SPINLOCK_MAGIC 0x1D244B3C
-
-#define __SPIN_LOCK_UNLOCKED   { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL }
-#undef SPIN_LOCK_UNLOCKED
-#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-#define CHECK_LOCK(x)                                                  \
-       do {                                                            \
-               if (unlikely((x)->magic != SPINLOCK_MAGIC)) {                   \
-                       printk(KERN_ERR "%s:%d: spin_is_locked"         \
-                       " on uninitialized spinlock %p.\n",             \
-                               __FILE__, __LINE__, (x));               \
-               }                                                       \
-       } while(0)
-
-#define spin_is_locked(x)                                              \
-       ({                                                              \
-               CHECK_LOCK(x);                                          \
-               volatile unsigned int *a = __ldcw_align(x);             \
-               if (unlikely((*a == 0) && (x)->babble)) {                               \
-                       (x)->babble--;                                  \
-                       printk("KERN_WARNING                            \
-                               %s:%d: spin_is_locked(%s/%p) already"   \
-                               " locked by %s:%d in %s at %p(%d)\n",   \
-                               __FILE__,__LINE__, (x)->module, (x),    \
-                               (x)->bfile, (x)->bline, (x)->task->comm,\
-                               (x)->previous, (x)->oncpu);             \
-               }                                                       \
-               *a == 0;                                                \
-       })
-
-#define spin_unlock_wait(x)                                            \
-       do {                                                            \
-               CHECK_LOCK(x);                                          \
-               volatile unsigned int *a = __ldcw_align(x);             \
-               if (unlikely((*a == 0) && (x)->babble)) {                               \
-                       (x)->babble--;                                  \
-                       printk("KERN_WARNING                            \
-                               %s:%d: spin_unlock_wait(%s/%p)"         \
-                               " owned by %s:%d in %s at %p(%d)\n",    \
-                               __FILE__,__LINE__, (x)->module, (x),    \
-                               (x)->bfile, (x)->bline, (x)->task->comm,\
-                               (x)->previous, (x)->oncpu);             \
-               }                                                       \
-               barrier();                                              \
-       } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0)
-
-extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no);
-extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int);
-extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int);
-
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-
-#define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__)
-#define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__)
-#define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__)
-
-/* just in case we need it */
-#define spin_lock_own(LOCK, LOCATION)                                  \
-do {                                                                   \
-       volatile unsigned int *a = __ldcw_align(LOCK);                  \
-       if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id())))      \
-               printk("KERN_WARNING                                    \
-                       %s: called on %d from %p but lock %s on %d\n",  \
-                       LOCATION, smp_processor_id(),                   \
-                       __builtin_return_address(0),                    \
-                       (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \
-} while (0)
-
-#endif /* !(CONFIG_DEBUG_SPINLOCK) */
 
 /*
  * Read-write spinlocks, allowing multiple readers
  * but only one writer.
  */
-typedef struct {
-       spinlock_t lock;
-       volatile int counter;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 }
-
-#define rwlock_init(lp)        do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
 
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
 /* read_lock, read_unlock are pretty straightforward.  Of course it somehow
  * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
 
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline);
-#define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__)
-#else
-static  __inline__ void _raw_read_lock(rwlock_t *rw)
+static  __inline__ void __raw_read_lock(raw_rwlock_t *rw)
 {
        unsigned long flags;
        local_irq_save(flags);
-       _raw_spin_lock(&rw->lock); 
+       __raw_spin_lock(&rw->lock);
 
        rw->counter++;
 
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
        local_irq_restore(flags);
 }
-#endif /* CONFIG_DEBUG_RWLOCK */
 
-static  __inline__ void _raw_read_unlock(rwlock_t *rw)
+static  __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
 {
        unsigned long flags;
        local_irq_save(flags);
-       _raw_spin_lock(&rw->lock); 
+       __raw_spin_lock(&rw->lock);
 
        rw->counter--;
 
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
        local_irq_restore(flags);
 }
 
@@ -194,20 +96,17 @@ static  __inline__ void _raw_read_unlock(rwlock_t *rw)
  * writers) in interrupt handlers someone fucked up and we'd dead-lock
  * sooner or later anyway.   prumpf */
 
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline);
-#define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__)
-#else
-static  __inline__ void _raw_write_lock(rwlock_t *rw)
+static  __inline__ void __raw_write_lock(raw_rwlock_t *rw)
 {
 retry:
-       _raw_spin_lock(&rw->lock);
+       __raw_spin_lock(&rw->lock);
 
        if(rw->counter != 0) {
                /* this basically never happens */
-               _raw_spin_unlock(&rw->lock);
+               __raw_spin_unlock(&rw->lock);
 
-               while(rw->counter != 0);
+               while (rw->counter != 0)
+                       cpu_relax();
 
                goto retry;
        }
@@ -215,26 +114,21 @@ retry:
        /* got it.  now leave without unlocking */
        rw->counter = -1; /* remember we are locked */
 }
-#endif /* CONFIG_DEBUG_RWLOCK */
 
 /* write_unlock is absolutely trivial - we don't have to wait for anything */
 
-static  __inline__ void _raw_write_unlock(rwlock_t *rw)
+static  __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
 {
        rw->counter = 0;
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
 }
 
-#ifdef CONFIG_DEBUG_RWLOCK
-extern int _dbg_write_trylock(rwlock_t * rw, const char *bfile, int bline);
-#define _raw_write_trylock(rw) _dbg_write_trylock(rw, __FILE__, __LINE__)
-#else
-static  __inline__ int _raw_write_trylock(rwlock_t *rw)
+static  __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
 {
-       _raw_spin_lock(&rw->lock);
+       __raw_spin_lock(&rw->lock);
        if (rw->counter != 0) {
                /* this basically never happens */
-               _raw_spin_unlock(&rw->lock);
+               __raw_spin_unlock(&rw->lock);
 
                return 0;
        }
@@ -243,14 +137,13 @@ static  __inline__ int _raw_write_trylock(rwlock_t *rw)
        rw->counter = -1; /* remember we are locked */
        return 1;
 }
-#endif /* CONFIG_DEBUG_RWLOCK */
 
-static __inline__ int is_read_locked(rwlock_t *rw)
+static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw)
 {
        return rw->counter > 0;
 }
 
-static __inline__ int is_write_locked(rwlock_t *rw)
+static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw)
 {
        return rw->counter < 0;
 }
diff --git a/include/asm-parisc/spinlock_types.h b/include/asm-parisc/spinlock_types.h
new file mode 100644 (file)
index 0000000..785bba8
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int lock[4];
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { { 1, 1, 1, 1 } }
+
+typedef struct {
+       raw_spinlock_t lock;
+       volatile int counter;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { __RAW_SPIN_LOCK_UNLOCKED, 0 }
+
+#endif
index 81c5433..26ff844 100644 (file)
@@ -160,29 +160,7 @@ static inline void set_eiem(unsigned long val)
 })
 
 #ifdef CONFIG_SMP
-/*
- * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-typedef struct {
-       volatile unsigned int lock[4];
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned long magic;
-       volatile unsigned int babble;
-       const char *module;
-       char *bfile;
-       int bline;
-       int oncpu;
-       void *previous;
-       struct task_struct * task;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
-
+# define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
 #endif
 
 #define KERNEL_START (0x10100000 - 0x1000)
index 17530c2..829481c 100644 (file)
@@ -41,6 +41,10 @@ extern void smp_send_xmon_break(int cpu);
 struct pt_regs;
 extern void smp_message_recv(int, struct pt_regs *);
 
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+extern void cpu_die(void) __attribute__((noreturn));
+
 #define NO_PROC_ID             0xFF            /* No processor magic marker */
 #define PROC_CHANGE_PENALTY    20
 
@@ -64,6 +68,8 @@ extern struct klock_info_struct klock_info;
 
 #else /* !(CONFIG_SMP) */
 
+static inline void cpu_die(void) { }
+
 #endif /* !(CONFIG_SMP) */
 
 #endif /* !(_PPC_SMP_H) */
index 909199a..20edcf2 100644 (file)
@@ -5,41 +5,21 @@
 
 /*
  * Simple spin lock operations.
+ *
+ * (the type definitions are in asm/raw_spinlock_types.h)
  */
 
-typedef struct {
-       volatile unsigned long lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       volatile unsigned long owner_pc;
-       volatile unsigned long owner_cpu;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#ifdef __KERNEL__
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPINLOCK_DEBUG_INIT     , 0, 0
-#else
-#define SPINLOCK_DEBUG_INIT     /* */
-#endif
-
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 SPINLOCK_DEBUG_INIT }
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-
-#ifndef CONFIG_DEBUG_SPINLOCK
-
-static inline void _raw_spin_lock(spinlock_t *lock)
+#define __raw_spin_is_locked(x)                ((x)->lock != 0)
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
        unsigned long tmp;
 
        __asm__ __volatile__(
-       "b      1f              # spin_lock\n\
+       "b      1f              # __raw_spin_lock\n\
 2:     lwzx    %0,0,%1\n\
        cmpwi   0,%0,0\n\
        bne+    2b\n\
@@ -55,21 +35,13 @@ static inline void _raw_spin_lock(spinlock_t *lock)
        : "cr0", "memory");
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-       __asm__ __volatile__("eieio             # spin_unlock": : :"memory");
+       __asm__ __volatile__("eieio     # __raw_spin_unlock": : :"memory");
        lock->lock = 0;
 }
 
-#define _raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock))
-
-#else
-
-extern void _raw_spin_lock(spinlock_t *lock);
-extern void _raw_spin_unlock(spinlock_t *lock);
-extern int _raw_spin_trylock(spinlock_t *lock);
-
-#endif
+#define __raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock))
 
 /*
  * Read-write spinlocks, allowing multiple readers
@@ -81,22 +53,11 @@ extern int _raw_spin_trylock(spinlock_t *lock);
  * irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
  */
-typedef struct {
-       volatile signed int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
 
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
+#define __raw_read_can_lock(rw)        ((rw)->lock >= 0)
+#define __raw_write_can_lock(rw)       (!(rw)->lock)
 
-#define read_can_lock(rw)      ((rw)->lock >= 0)
-#define write_can_lock(rw)     (!(rw)->lock)
-
-#ifndef CONFIG_DEBUG_SPINLOCK
-
-static __inline__ int _raw_read_trylock(rwlock_t *rw)
+static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
 {
        signed int tmp;
 
@@ -116,7 +77,7 @@ static __inline__ int _raw_read_trylock(rwlock_t *rw)
        return tmp > 0;
 }
 
-static __inline__ void _raw_read_lock(rwlock_t *rw)
+static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
 {
        signed int tmp;
 
@@ -137,7 +98,7 @@ static __inline__ void _raw_read_lock(rwlock_t *rw)
        : "cr0", "memory");
 }
 
-static __inline__ void _raw_read_unlock(rwlock_t *rw)
+static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
 {
        signed int tmp;
 
@@ -153,7 +114,7 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw)
        : "cr0", "memory");
 }
 
-static __inline__ int _raw_write_trylock(rwlock_t *rw)
+static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
 {
        signed int tmp;
 
@@ -173,7 +134,7 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw)
        return tmp == 0;
 }
 
-static __inline__ void _raw_write_lock(rwlock_t *rw)
+static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
 {
        signed int tmp;
 
@@ -194,22 +155,10 @@ static __inline__ void _raw_write_lock(rwlock_t *rw)
        : "cr0", "memory");
 }
 
-static __inline__ void _raw_write_unlock(rwlock_t *rw)
+static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
 {
        __asm__ __volatile__("eieio             # write_unlock": : :"memory");
        rw->lock = 0;
 }
 
-#else
-
-extern void _raw_read_lock(rwlock_t *rw);
-extern void _raw_read_unlock(rwlock_t *rw);
-extern void _raw_write_lock(rwlock_t *rw);
-extern void _raw_write_unlock(rwlock_t *rw);
-extern int _raw_read_trylock(rwlock_t *rw);
-extern int _raw_write_trylock(rwlock_t *rw);
-
-#endif
-
 #endif /* __ASM_SPINLOCK_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/spinlock_types.h b/include/asm-ppc/spinlock_types.h
new file mode 100644 (file)
index 0000000..7919ccc
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned long lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile signed int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index 513a334..d754ab5 100644 (file)
@@ -88,6 +88,7 @@ extern void *cacheable_memcpy(void *, const void *, unsigned int);
 extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
 extern void bad_page_fault(struct pt_regs *, unsigned long, int);
 extern void die(const char *, struct pt_regs *, long);
+extern void _exception(int, struct pt_regs *, int, unsigned long);
 #ifdef CONFIG_BOOKE_WDT
 extern u32 booke_wdt_enabled;
 extern u32 booke_wdt_period;
index acd1156..14cb895 100644 (file)
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
  */
 #include <linux/config.h>
 #include <asm/paca.h>
 #include <asm/hvcall.h>
 #include <asm/iSeries/HvCall.h>
 
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
+#define __raw_spin_is_locked(x)                ((x)->slock != 0)
 
-typedef struct {
-       volatile signed int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
+/*
+ * This returns the old value in the lock, so we succeeded
+ * in getting the lock if the return value is 0.
+ */
+static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock)
+{
+       unsigned long tmp, tmp2;
 
-#ifdef __KERNEL__
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 }
+       __asm__ __volatile__(
+"      lwz             %1,%3(13)               # __spin_trylock\n\
+1:     lwarx           %0,0,%2\n\
+       cmpwi           0,%0,0\n\
+       bne-            2f\n\
+       stwcx.          %1,0,%2\n\
+       bne-            1b\n\
+       isync\n\
+2:"    : "=&r" (tmp), "=&r" (tmp2)
+       : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token))
+       : "cr0", "memory");
 
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+       return tmp;
+}
 
-static __inline__ void _raw_spin_unlock(spinlock_t *lock)
+static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock)
 {
-       __asm__ __volatile__("lwsync    # spin_unlock": : :"memory");
-       lock->lock = 0;
+       return __spin_trylock(lock) == 0;
 }
 
 /*
@@ -64,44 +70,15 @@ static __inline__ void _raw_spin_unlock(spinlock_t *lock)
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 /* We only yield to the hypervisor if we are in shared processor mode */
 #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc)
-extern void __spin_yield(spinlock_t *lock);
-extern void __rw_yield(rwlock_t *lock);
+extern void __spin_yield(raw_spinlock_t *lock);
+extern void __rw_yield(raw_rwlock_t *lock);
 #else /* SPLPAR || ISERIES */
 #define __spin_yield(x)        barrier()
 #define __rw_yield(x)  barrier()
 #define SHARED_PROCESSOR       0
 #endif
-extern void spin_unlock_wait(spinlock_t *lock);
-
-/*
- * This returns the old value in the lock, so we succeeded
- * in getting the lock if the return value is 0.
- */
-static __inline__ unsigned long __spin_trylock(spinlock_t *lock)
-{
-       unsigned long tmp, tmp2;
-
-       __asm__ __volatile__(
-"      lwz             %1,%3(13)               # __spin_trylock\n\
-1:     lwarx           %0,0,%2\n\
-       cmpwi           0,%0,0\n\
-       bne-            2f\n\
-       stwcx.          %1,0,%2\n\
-       bne-            1b\n\
-       isync\n\
-2:"    : "=&r" (tmp), "=&r" (tmp2)
-       : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token))
-       : "cr0", "memory");
-
-       return tmp;
-}
-
-static int __inline__ _raw_spin_trylock(spinlock_t *lock)
-{
-       return __spin_trylock(lock) == 0;
-}
 
-static void __inline__ _raw_spin_lock(spinlock_t *lock)
+static void __inline__ __raw_spin_lock(raw_spinlock_t *lock)
 {
        while (1) {
                if (likely(__spin_trylock(lock) == 0))
@@ -110,12 +87,12 @@ static void __inline__ _raw_spin_lock(spinlock_t *lock)
                        HMT_low();
                        if (SHARED_PROCESSOR)
                                __spin_yield(lock);
-               } while (unlikely(lock->lock != 0));
+               } while (unlikely(lock->slock != 0));
                HMT_medium();
        }
 }
 
-static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
+static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
 {
        unsigned long flags_dis;
 
@@ -128,12 +105,20 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag
                        HMT_low();
                        if (SHARED_PROCESSOR)
                                __spin_yield(lock);
-               } while (unlikely(lock->lock != 0));
+               } while (unlikely(lock->slock != 0));
                HMT_medium();
                local_irq_restore(flags_dis);
        }
 }
 
+static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+       __asm__ __volatile__("lwsync    # __raw_spin_unlock": : :"memory");
+       lock->slock = 0;
+}
+
+extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
+
 /*
  * Read-write spinlocks, allowing multiple readers
  * but only one writer.
@@ -144,24 +129,15 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag
  * irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
  */
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
 
-#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-
-#define read_can_lock(rw)      ((rw)->lock >= 0)
-#define write_can_lock(rw)     (!(rw)->lock)
-
-static __inline__ void _raw_write_unlock(rwlock_t *rw)
-{
-       __asm__ __volatile__("lwsync            # write_unlock": : :"memory");
-       rw->lock = 0;
-}
+#define __raw_read_can_lock(rw)                ((rw)->lock >= 0)
+#define __raw_write_can_lock(rw)       (!(rw)->lock)
 
 /*
  * This returns the old value in the lock + 1,
  * so we got a read lock if the return value is > 0.
  */
-static long __inline__ __read_trylock(rwlock_t *rw)
+static long __inline__ __read_trylock(raw_rwlock_t *rw)
 {
        long tmp;
 
@@ -180,45 +156,11 @@ static long __inline__ __read_trylock(rwlock_t *rw)
        return tmp;
 }
 
-static int __inline__ _raw_read_trylock(rwlock_t *rw)
-{
-       return __read_trylock(rw) > 0;
-}
-
-static void __inline__ _raw_read_lock(rwlock_t *rw)
-{
-       while (1) {
-               if (likely(__read_trylock(rw) > 0))
-                       break;
-               do {
-                       HMT_low();
-                       if (SHARED_PROCESSOR)
-                               __rw_yield(rw);
-               } while (unlikely(rw->lock < 0));
-               HMT_medium();
-       }
-}
-
-static void __inline__ _raw_read_unlock(rwlock_t *rw)
-{
-       long tmp;
-
-       __asm__ __volatile__(
-       "eieio                          # read_unlock\n\
-1:     lwarx           %0,0,%1\n\
-       addic           %0,%0,-1\n\
-       stwcx.          %0,0,%1\n\
-       bne-            1b"
-       : "=&r"(tmp)
-       : "r"(&rw->lock)
-       : "cr0", "memory");
-}
-
 /*
  * This returns the old value in the lock,
  * so we got the write lock if the return value is 0.
  */
-static __inline__ long __write_trylock(rwlock_t *rw)
+static __inline__ long __write_trylock(raw_rwlock_t *rw)
 {
        long tmp, tmp2;
 
@@ -237,12 +179,21 @@ static __inline__ long __write_trylock(rwlock_t *rw)
        return tmp;
 }
 
-static int __inline__ _raw_write_trylock(rwlock_t *rw)
+static void __inline__ __raw_read_lock(raw_rwlock_t *rw)
 {
-       return __write_trylock(rw) == 0;
+       while (1) {
+               if (likely(__read_trylock(rw) > 0))
+                       break;
+               do {
+                       HMT_low();
+                       if (SHARED_PROCESSOR)
+                               __rw_yield(rw);
+               } while (unlikely(rw->lock < 0));
+               HMT_medium();
+       }
 }
 
-static void __inline__ _raw_write_lock(rwlock_t *rw)
+static void __inline__ __raw_write_lock(raw_rwlock_t *rw)
 {
        while (1) {
                if (likely(__write_trylock(rw) == 0))
@@ -256,5 +207,35 @@ static void __inline__ _raw_write_lock(rwlock_t *rw)
        }
 }
 
-#endif /* __KERNEL__ */
+static int __inline__ __raw_read_trylock(raw_rwlock_t *rw)
+{
+       return __read_trylock(rw) > 0;
+}
+
+static int __inline__ __raw_write_trylock(raw_rwlock_t *rw)
+{
+       return __write_trylock(rw) == 0;
+}
+
+static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
+{
+       long tmp;
+
+       __asm__ __volatile__(
+       "eieio                          # read_unlock\n\
+1:     lwarx           %0,0,%1\n\
+       addic           %0,%0,-1\n\
+       stwcx.          %0,0,%1\n\
+       bne-            1b"
+       : "=&r"(tmp)
+       : "r"(&rw->lock)
+       : "cr0", "memory");
+}
+
+static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
+{
+       __asm__ __volatile__("lwsync    # write_unlock": : :"memory");
+       rw->lock = 0;
+}
+
 #endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-ppc64/spinlock_types.h b/include/asm-ppc64/spinlock_types.h
new file mode 100644 (file)
index 0000000..a37c8ea
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile signed int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index 321b23b..273dbec 100644 (file)
@@ -27,25 +27,19 @@ _raw_compare_and_swap(volatile unsigned int *lock,
  * on the local processor, one does not.
  *
  * We make no fairness assumptions. They have a cost.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
  */
 
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} __attribute__ ((aligned (4))) spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 }
-#define spin_lock_init(lp)     do { (lp)->lock = 0; } while(0)
-#define spin_unlock_wait(lp)   do { barrier(); } while(((volatile spinlock_t *)(lp))->lock)
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_is_locked(x) ((x)->lock != 0)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
-extern void _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc);
-extern int _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc);
+extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc);
+extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc);
 
-static inline void _raw_spin_lock(spinlock_t *lp)
+static inline void __raw_spin_lock(raw_spinlock_t *lp)
 {
        unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
 
@@ -53,7 +47,7 @@ static inline void _raw_spin_lock(spinlock_t *lp)
                _raw_spin_lock_wait(lp, pc);
 }
 
-static inline int _raw_spin_trylock(spinlock_t *lp)
+static inline int __raw_spin_trylock(raw_spinlock_t *lp)
 {
        unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
 
@@ -62,7 +56,7 @@ static inline int _raw_spin_trylock(spinlock_t *lp)
        return _raw_spin_trylock_retry(lp, pc);
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lp)
+static inline void __raw_spin_unlock(raw_spinlock_t *lp)
 {
        _raw_compare_and_swap(&lp->lock, lp->lock, 0);
 }
@@ -77,36 +71,25 @@ static inline void _raw_spin_unlock(spinlock_t *lp)
  * irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
  */
-typedef struct {
-       volatile unsigned int lock;
-       volatile unsigned long owner_pc;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
-
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
 
 /**
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define read_can_lock(x) ((int)(x)->lock >= 0)
+#define __raw_read_can_lock(x) ((int)(x)->lock >= 0)
 
 /**
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define write_can_lock(x) ((x)->lock == 0)
+#define __raw_write_can_lock(x) ((x)->lock == 0)
 
-extern void _raw_read_lock_wait(rwlock_t *lp);
-extern int _raw_read_trylock_retry(rwlock_t *lp);
-extern void _raw_write_lock_wait(rwlock_t *lp);
-extern int _raw_write_trylock_retry(rwlock_t *lp);
+extern void _raw_read_lock_wait(raw_rwlock_t *lp);
+extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
+extern void _raw_write_lock_wait(raw_rwlock_t *lp);
+extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
 
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
        unsigned int old;
        old = rw->lock & 0x7fffffffU;
@@ -114,7 +97,7 @@ static inline void _raw_read_lock(rwlock_t *rw)
                _raw_read_lock_wait(rw);
 }
 
-static inline void _raw_read_unlock(rwlock_t *rw)
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
        unsigned int old, cmp;
 
@@ -125,18 +108,18 @@ static inline void _raw_read_unlock(rwlock_t *rw)
        } while (cmp != old);
 }
 
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
        if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
                _raw_write_lock_wait(rw);
 }
 
-static inline void _raw_write_unlock(rwlock_t *rw)
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
        _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
 }
 
-static inline int _raw_read_trylock(rwlock_t *rw)
+static inline int __raw_read_trylock(raw_rwlock_t *rw)
 {
        unsigned int old;
        old = rw->lock & 0x7fffffffU;
@@ -145,7 +128,7 @@ static inline int _raw_read_trylock(rwlock_t *rw)
        return _raw_read_trylock_retry(rw);
 }
 
-static inline int _raw_write_trylock(rwlock_t *rw)
+static inline int __raw_write_trylock(raw_rwlock_t *rw)
 {
        if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
                return 1;
diff --git a/include/asm-s390/spinlock_types.h b/include/asm-s390/spinlock_types.h
new file mode 100644 (file)
index 0000000..f79a221
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int lock;
+} __attribute__ ((aligned (4))) raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int lock;
+       volatile unsigned int owner_pc;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0, 0 }
+
+#endif
index e770b55..846322d 100644 (file)
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
  */
-typedef struct {
-       volatile unsigned long lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
 
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 }
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_unlock_wait(x)    do { barrier(); } while (spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_is_locked(x)        ((x)->lock != 0)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_unlock_wait(x) \
+       do { cpu_relax(); } while (__raw_spin_is_locked(x))
 
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
@@ -36,7 +27,7 @@ typedef struct {
  *
  * We make no fairness assumptions.  They have a cost.
  */
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
        __asm__ __volatile__ (
                "1:\n\t"
@@ -49,14 +40,14 @@ static inline void _raw_spin_lock(spinlock_t *lock)
        );
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        assert_spin_locked(lock);
 
        lock->lock = 0;
 }
 
-#define _raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock))
+#define __raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock))
 
 /*
  * Read-write spinlocks, allowing multiple readers but only one writer.
@@ -66,51 +57,40 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
  */
-typedef struct {
-       spinlock_t lock;
-       atomic_t counter;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_BIAS           0x01000000
-#define RW_LOCK_UNLOCKED       (rwlock_t) { { 0 }, { RW_LOCK_BIAS } }
-#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while (0)
-
-static inline void _raw_read_lock(rwlock_t *rw)
+
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
-       _raw_spin_lock(&rw->lock);
+       __raw_spin_lock(&rw->lock);
 
        atomic_inc(&rw->counter);
 
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
 }
 
-static inline void _raw_read_unlock(rwlock_t *rw)
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
-       _raw_spin_lock(&rw->lock);
+       __raw_spin_lock(&rw->lock);
 
        atomic_dec(&rw->counter);
 
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
 }
 
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
-       _raw_spin_lock(&rw->lock);
+       __raw_spin_lock(&rw->lock);
        atomic_set(&rw->counter, -1);
 }
 
-static inline void _raw_write_unlock(rwlock_t *rw)
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
        atomic_set(&rw->counter, 0);
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
 }
 
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
-static inline int _raw_write_trylock(rwlock_t *rw)
+static inline int __raw_write_trylock(raw_rwlock_t *rw)
 {
        if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter))
                return 1;
@@ -121,4 +101,3 @@ static inline int _raw_write_trylock(rwlock_t *rw)
 }
 
 #endif /* __ASM_SH_SPINLOCK_H */
-
diff --git a/include/asm-sh/spinlock_types.h b/include/asm-sh/spinlock_types.h
new file mode 100644 (file)
index 0000000..8c41b6c
--- /dev/null
@@ -0,0 +1,22 @@
+#ifndef __ASM_SH_SPINLOCK_TYPES_H
+#define __ASM_SH_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned long lock;
+} raw_spinlock_t;
+
+#define __SPIN_LOCK_UNLOCKED           { 0 }
+
+typedef struct {
+       raw_spinlock_t lock;
+       atomic_t counter;
+} raw_rwlock_t;
+
+#define RW_LOCK_BIAS                   0x01000000
+#define __RAW_RW_LOCK_UNLOCKED         { { 0 }, { RW_LOCK_BIAS } }
+
+#endif
index 0cbd87a..111727a 100644 (file)
 
 #include <asm/psr.h>
 
-#ifdef CONFIG_DEBUG_SPINLOCK
-struct _spinlock_debug {
-       unsigned char lock;
-       unsigned long owner_pc;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-};
-typedef struct _spinlock_debug spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0, 0 }
-#define spin_lock_init(lp)     do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
-#define spin_is_locked(lp)  (*((volatile unsigned char *)(&((lp)->lock))) != 0)
-#define spin_unlock_wait(lp)   do { barrier(); } while(*(volatile unsigned char *)(&(lp)->lock))
-
-extern void _do_spin_lock(spinlock_t *lock, char *str);
-extern int _spin_trylock(spinlock_t *lock);
-extern void _do_spin_unlock(spinlock_t *lock);
-
-#define _raw_spin_trylock(lp)  _spin_trylock(lp)
-#define _raw_spin_lock(lock)   _do_spin_lock(lock, "spin_lock")
-#define _raw_spin_unlock(lock) _do_spin_unlock(lock)
-
-struct _rwlock_debug {
-       volatile unsigned int lock;
-       unsigned long owner_pc;
-       unsigned long reader_pc[NR_CPUS];
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-};
-typedef struct _rwlock_debug rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, {0} }
-
-#define rwlock_init(lp)        do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
-
-extern void _do_read_lock(rwlock_t *rw, char *str);
-extern void _do_read_unlock(rwlock_t *rw, char *str);
-extern void _do_write_lock(rwlock_t *rw, char *str);
-extern void _do_write_unlock(rwlock_t *rw);
-
-#define _raw_read_lock(lock)   \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_read_lock(lock, "read_lock"); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_read_unlock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_read_unlock(lock, "read_unlock"); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_write_lock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_write_lock(lock, "write_lock"); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_write_unlock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_write_unlock(lock); \
-       local_irq_restore(flags); \
-} while(0)
-
-#else /* !CONFIG_DEBUG_SPINLOCK */
-
-typedef struct {
-       unsigned char lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 }
-
-#define spin_lock_init(lock)   (*((unsigned char *)(lock)) = 0)
-#define spin_is_locked(lock)    (*((volatile unsigned char *)(lock)) != 0)
+#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
 
-#define spin_unlock_wait(lock) \
-do { \
-       barrier(); \
-} while(*((volatile unsigned char *)lock))
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
-extern __inline__ void _raw_spin_lock(spinlock_t *lock)
+extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock)
 {
        __asm__ __volatile__(
        "\n1:\n\t"
@@ -121,7 +37,7 @@ extern __inline__ void _raw_spin_lock(spinlock_t *lock)
        : "g2", "memory", "cc");
 }
 
-extern __inline__ int _raw_spin_trylock(spinlock_t *lock)
+extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        unsigned int result;
        __asm__ __volatile__("ldstub [%1], %0"
@@ -131,7 +47,7 @@ extern __inline__ int _raw_spin_trylock(spinlock_t *lock)
        return (result == 0);
 }
 
-extern __inline__ void _raw_spin_unlock(spinlock_t *lock)
+extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
 }
@@ -147,23 +63,11 @@ extern __inline__ void _raw_spin_unlock(spinlock_t *lock)
  *
  * XXX This might create some problems with my dual spinlock
  * XXX scheme, deadlocks etc. -DaveM
- */
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-
-#define rwlock_init(lp)        do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
-
-
-/* Sort of like atomic_t's on Sparc, but even more clever.
+ *
+ * Sort of like atomic_t's on Sparc, but even more clever.
  *
  *     ------------------------------------
- *     | 24-bit counter           | wlock |  rwlock_t
+ *     | 24-bit counter           | wlock |  raw_rwlock_t
  *     ------------------------------------
  *      31                       8 7     0
  *
@@ -174,9 +78,9 @@ typedef struct {
  *
  * Unfortunately this scheme limits us to ~16,000,000 cpus.
  */
-extern __inline__ void _read_lock(rwlock_t *rw)
+extern __inline__ void __read_lock(raw_rwlock_t *rw)
 {
-       register rwlock_t *lp asm("g1");
+       register raw_rwlock_t *lp asm("g1");
        lp = rw;
        __asm__ __volatile__(
        "mov    %%o7, %%g4\n\t"
@@ -187,16 +91,16 @@ extern __inline__ void _read_lock(rwlock_t *rw)
        : "g2", "g4", "memory", "cc");
 }
 
-#define _raw_read_lock(lock) \
+#define __raw_read_lock(lock) \
 do {   unsigned long flags; \
        local_irq_save(flags); \
-       _read_lock(lock); \
+       __raw_read_lock(lock); \
        local_irq_restore(flags); \
 } while(0)
 
-extern __inline__ void _read_unlock(rwlock_t *rw)
+extern __inline__ void __read_unlock(raw_rwlock_t *rw)
 {
-       register rwlock_t *lp asm("g1");
+       register raw_rwlock_t *lp asm("g1");
        lp = rw;
        __asm__ __volatile__(
        "mov    %%o7, %%g4\n\t"
@@ -207,16 +111,16 @@ extern __inline__ void _read_unlock(rwlock_t *rw)
        : "g2", "g4", "memory", "cc");
 }
 
-#define _raw_read_unlock(lock) \
+#define __raw_read_unlock(lock) \
 do {   unsigned long flags; \
        local_irq_save(flags); \
-       _read_unlock(lock); \
+       __raw_read_unlock(lock); \
        local_irq_restore(flags); \
 } while(0)
 
-extern __inline__ void _raw_write_lock(rwlock_t *rw)
+extern __inline__ void __raw_write_lock(raw_rwlock_t *rw)
 {
-       register rwlock_t *lp asm("g1");
+       register raw_rwlock_t *lp asm("g1");
        lp = rw;
        __asm__ __volatile__(
        "mov    %%o7, %%g4\n\t"
@@ -227,11 +131,9 @@ extern __inline__ void _raw_write_lock(rwlock_t *rw)
        : "g2", "g4", "memory", "cc");
 }
 
-#define _raw_write_unlock(rw)  do { (rw)->lock = 0; } while(0)
-
-#endif /* CONFIG_DEBUG_SPINLOCK */
+#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
 
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 
 #endif /* !(__ASSEMBLY__) */
 
diff --git a/include/asm-sparc/spinlock_types.h b/include/asm-sparc/spinlock_types.h
new file mode 100644 (file)
index 0000000..0a0fb11
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __SPARC_SPINLOCK_TYPES_H
+#define __SPARC_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       unsigned char lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index a02c437..ec85d12 100644 (file)
  * must be pre-V9 branches.
  */
 
-#ifndef CONFIG_DEBUG_SPINLOCK
+#define __raw_spin_is_locked(lp)       ((lp)->lock != 0)
 
-typedef struct {
-       volatile unsigned char lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) {0,}
+#define __raw_spin_unlock_wait(lp)     \
+       do {    rmb();                  \
+       } while((lp)->lock)
 
-#define spin_lock_init(lp)     do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
-#define spin_is_locked(lp)  ((lp)->lock != 0)
-
-#define spin_unlock_wait(lp)   \
-do {   rmb();                  \
-} while((lp)->lock)
-
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
        unsigned long tmp;
 
@@ -67,7 +56,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
        : "memory");
 }
 
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        unsigned long result;
 
@@ -81,7 +70,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
        return (result == 0UL);
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        __asm__ __volatile__(
 "      membar          #StoreStore | #LoadStore\n"
@@ -91,7 +80,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
        : "memory");
 }
 
-static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
 {
        unsigned long tmp1, tmp2;
 
@@ -115,51 +104,9 @@ static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
        : "memory");
 }
 
-#else /* !(CONFIG_DEBUG_SPINLOCK) */
-
-typedef struct {
-       volatile unsigned char lock;
-       unsigned int owner_pc, owner_cpu;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
-#define spin_lock_init(lp)     do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
-#define spin_is_locked(__lock) ((__lock)->lock != 0)
-#define spin_unlock_wait(__lock)       \
-do { \
-       rmb(); \
-} while((__lock)->lock)
-
-extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller);
-extern void _do_spin_unlock(spinlock_t *lock);
-extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller);
-
-#define _raw_spin_trylock(lp)  \
-       _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0))
-#define _raw_spin_lock(lock)   \
-       _do_spin_lock(lock, "spin_lock", \
-                     (unsigned long) __builtin_return_address(0))
-#define _raw_spin_unlock(lock) _do_spin_unlock(lock)
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
 
-#ifndef CONFIG_DEBUG_SPINLOCK
-
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-#define RW_LOCK_UNLOCKED       (rwlock_t) {0,}
-#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
-
-static void inline __read_lock(rwlock_t *lock)
+static void inline __read_lock(raw_rwlock_t *lock)
 {
        unsigned long tmp1, tmp2;
 
@@ -184,7 +131,7 @@ static void inline __read_lock(rwlock_t *lock)
        : "memory");
 }
 
-static void inline __read_unlock(rwlock_t *lock)
+static void inline __read_unlock(raw_rwlock_t *lock)
 {
        unsigned long tmp1, tmp2;
 
@@ -201,7 +148,7 @@ static void inline __read_unlock(rwlock_t *lock)
        : "memory");
 }
 
-static void inline __write_lock(rwlock_t *lock)
+static void inline __write_lock(raw_rwlock_t *lock)
 {
        unsigned long mask, tmp1, tmp2;
 
@@ -228,7 +175,7 @@ static void inline __write_lock(rwlock_t *lock)
        : "memory");
 }
 
-static void inline __write_unlock(rwlock_t *lock)
+static void inline __write_unlock(raw_rwlock_t *lock)
 {
        __asm__ __volatile__(
 "      membar          #LoadStore | #StoreStore\n"
@@ -238,7 +185,7 @@ static void inline __write_unlock(rwlock_t *lock)
        : "memory");
 }
 
-static int inline __write_trylock(rwlock_t *lock)
+static int inline __write_trylock(raw_rwlock_t *lock)
 {
        unsigned long mask, tmp1, tmp2, result;
 
@@ -263,78 +210,15 @@ static int inline __write_trylock(rwlock_t *lock)
        return result;
 }
 
-#define _raw_read_lock(p)      __read_lock(p)
-#define _raw_read_unlock(p)    __read_unlock(p)
-#define _raw_write_lock(p)     __write_lock(p)
-#define _raw_write_unlock(p)   __write_unlock(p)
-#define _raw_write_trylock(p)  __write_trylock(p)
-
-#else /* !(CONFIG_DEBUG_SPINLOCK) */
-
-typedef struct {
-       volatile unsigned long lock;
-       unsigned int writer_pc, writer_cpu;
-       unsigned int reader_pc[NR_CPUS];
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-#define RW_LOCK_UNLOCKED       (rwlock_t) { 0, 0, 0xff, { } }
-#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
-
-extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller);
-extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller);
-extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller);
-extern void _do_write_unlock(rwlock_t *rw, unsigned long caller);
-extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller);
-
-#define _raw_read_lock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_read_lock(lock, "read_lock", \
-                     (unsigned long) __builtin_return_address(0)); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_read_unlock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_read_unlock(lock, "read_unlock", \
-                     (unsigned long) __builtin_return_address(0)); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_write_lock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_write_lock(lock, "write_lock", \
-                     (unsigned long) __builtin_return_address(0)); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_write_unlock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_write_unlock(lock, \
-                     (unsigned long) __builtin_return_address(0)); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_write_trylock(lock) \
-({     unsigned long flags; \
-       int val; \
-       local_irq_save(flags); \
-       val = _do_write_trylock(lock, "write_trylock", \
-                               (unsigned long) __builtin_return_address(0)); \
-       local_irq_restore(flags); \
-       val; \
-})
-
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
-#define read_can_lock(rw)      (!((rw)->lock & 0x80000000UL))
-#define write_can_lock(rw)     (!(rw)->lock)
+#define __raw_read_lock(p)     __read_lock(p)
+#define __raw_read_unlock(p)   __read_unlock(p)
+#define __raw_write_lock(p)    __write_lock(p)
+#define __raw_write_unlock(p)  __write_unlock(p)
+#define __raw_write_trylock(p) __write_trylock(p)
+
+#define __raw_read_trylock(lock)       generic__raw_read_trylock(lock)
+#define __raw_read_can_lock(rw)                (!((rw)->lock & 0x80000000UL))
+#define __raw_write_can_lock(rw)       (!(rw)->lock)
 
 #endif /* !(__ASSEMBLY__) */
 
diff --git a/include/asm-sparc64/spinlock_types.h b/include/asm-sparc64/spinlock_types.h
new file mode 100644 (file)
index 0000000..e128112
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __SPARC64_SPINLOCK_TYPES_H
+#define __SPARC64_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned char lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index bd850a2..2c192ab 100644 (file)
@@ -96,8 +96,7 @@ extern unsigned long uml_physmem;
 
 #define __va_space (8*1024*1024)
 
-extern unsigned long to_phys(void *virt);
-extern void *to_virt(unsigned long phys);
+#include "mem.h"
 
 /* Cast to unsigned long before casting to void * to avoid a warning from
  * mmap_kmem about cutting a long long down to a void *.  Not sure that
index b48e096..ed06170 100644 (file)
@@ -326,14 +326,22 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
 }
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 
-extern phys_t page_to_phys(struct page *page);
-
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
  */
 
-extern pte_t mk_pte(struct page *page, pgprot_t pgprot);
+#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
+#define __virt_to_page(virt) phys_to_page(__pa(virt))
+#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
+
+#define mk_pte(page, pgprot) \
+       ({ pte_t pte;                                   \
+                                                       \
+       pte_set_val(pte, page_to_phys(page), (pgprot)); \
+       if (pte_present(pte))                           \
+               pte_mknewprot(pte_mknewpage(pte));      \
+       pte;})
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
@@ -410,8 +418,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 #endif
 #endif
 
-extern struct page *phys_to_page(const unsigned long phys);
-extern struct page *__virt_to_page(const unsigned long virt);
 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
 
 /*
index 6c813eb..f757419 100644 (file)
@@ -8,7 +8,6 @@
 struct cpuinfo_x86; 
 struct pt_regs;
 
-extern void get_cpu_vendor(struct cpuinfo_x86*);
 extern void start_kernel(void);
 extern void pda_init(int); 
 
index 5aeb57a..6963683 100644 (file)
@@ -6,47 +6,21 @@
 #include <asm/page.h>
 #include <linux/config.h>
 
-extern int printk(const char * fmt, ...)
-       __attribute__ ((format (printf, 1, 2)));
-
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPINLOCK_MAGIC_INIT    , SPINLOCK_MAGIC
-#else
-#define SPINLOCK_MAGIC_INIT    /* */
-#endif
-
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-/*
+ *
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
  *
  * We make no fairness assumptions. They have a cost.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
  */
 
-#define spin_is_locked(x)      (*(volatile signed char *)(&(x)->lock) <= 0)
-#define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_is_locked(x) \
+               (*(volatile signed char *)(&(x)->slock) <= 0)
 
-#define spin_lock_string \
+#define __raw_spin_lock_string \
        "\n1:\t" \
        "lock ; decb %0\n\t" \
        "js 2f\n" \
@@ -58,74 +32,40 @@ typedef struct {
        "jmp 1b\n" \
        LOCK_SECTION_END
 
-/*
- * This works. Despite all the confusion.
- * (except on PPro SMP or if we are using OOSTORE)
- * (PPro errata 66, 92)
- */
-#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
-
-#define spin_unlock_string \
+#define __raw_spin_unlock_string \
        "movb $1,%0" \
-               :"=m" (lock->lock) : : "memory"
-
-
-static inline void _raw_spin_unlock(spinlock_t *lock)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(lock->magic != SPINLOCK_MAGIC);
-       assert_spin_locked(lock);
-#endif
-       __asm__ __volatile__(
-               spin_unlock_string
-       );
-}
-
-#else
-
-#define spin_unlock_string \
-       "xchgb %b0, %1" \
-               :"=q" (oldval), "=m" (lock->lock) \
-               :"0" (oldval) : "memory"
+               :"=m" (lock->slock) : : "memory"
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
-       char oldval = 1;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(lock->magic != SPINLOCK_MAGIC);
-       assert_spin_locked(lock);
-#endif
        __asm__ __volatile__(
-               spin_unlock_string
-       );
+               __raw_spin_lock_string
+               :"=m" (lock->slock) : : "memory");
 }
 
-#endif
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        char oldval;
+
        __asm__ __volatile__(
                "xchgb %b0,%1"
-               :"=q" (oldval), "=m" (lock->lock)
+               :"=q" (oldval), "=m" (lock->slock)
                :"0" (0) : "memory");
+
        return oldval > 0;
 }
 
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       if (lock->magic != SPINLOCK_MAGIC) {
-               printk("eip: %p\n", __builtin_return_address(0));
-               BUG();
-       }
-#endif
        __asm__ __volatile__(
-               spin_lock_string
-               :"=m" (lock->lock) : : "memory");
+               __raw_spin_unlock_string
+       );
 }
 
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
 /*
  * Read-write spinlocks, allowing multiple readers
@@ -136,33 +76,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
  * can "mix" irq-safe locks - any writer needs to get a
  * irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
- */
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC   0xdeaf1eed
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define RWLOCK_MAGIC_INIT      , RWLOCK_MAGIC
-#else
-#define RWLOCK_MAGIC_INIT      /* */
-#endif
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
-
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-
-#define read_can_lock(x)       ((int)(x)->lock > 0)
-#define write_can_lock(x)      ((x)->lock == RW_LOCK_BIAS)
-
-/*
+ *
  * On x86, we implement read-write locks as a 32-bit counter
  * with the high bit (sign) being the "contended" bit.
  *
@@ -170,29 +84,24 @@ typedef struct {
  *
  * Changed to use the same technique as rw semaphores.  See
  * semaphore.h for details.  -ben
+ *
+ * the helpers are in arch/i386/kernel/semaphore.c
  */
-/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
 
-static inline void _raw_read_lock(rwlock_t *rw)
+#define __raw_read_can_lock(x)         ((int)(x)->lock > 0)
+#define __raw_write_can_lock(x)                ((x)->lock == RW_LOCK_BIAS)
+
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
        __build_read_lock(rw, "__read_lock_failed");
 }
 
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
        __build_write_lock(rw, "__write_lock_failed");
 }
 
-#define _raw_read_unlock(rw)           asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
-#define _raw_write_unlock(rw)  asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
-
-static inline int _raw_read_trylock(rwlock_t *lock)
+static inline int __raw_read_trylock(raw_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
        atomic_dec(count);
@@ -202,7 +111,7 @@ static inline int _raw_read_trylock(rwlock_t *lock)
        return 0;
 }
 
-static inline int _raw_write_trylock(rwlock_t *lock)
+static inline int __raw_write_trylock(raw_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
        if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -211,4 +120,15 @@ static inline int _raw_write_trylock(rwlock_t *lock)
        return 0;
 }
 
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
+{
+       asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+}
+
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
+{
+       asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0"
+                               : "=m" (rw->lock) : : "memory");
+}
+
 #endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h
new file mode 100644 (file)
index 0000000..59efe84
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 1 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { RW_LOCK_BIAS }
+
+#endif
index cdaf03a..6e1c79c 100644 (file)
@@ -314,9 +314,8 @@ void zero_fill_bio(struct bio *bio);
  * bvec_kmap_irq and bvec_kunmap_irq!!
  *
  * This function MUST be inlined - it plays with the CPU interrupt flags.
- * Hence the `extern inline'.
  */
-extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
+static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
 {
        unsigned long addr;
 
@@ -332,7 +331,7 @@ extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
        return (char *) addr + bvec->bv_offset;
 }
 
-extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
+static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
 {
        unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
 
@@ -345,7 +344,7 @@ extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
 #define bvec_kunmap_irq(buf, flags)    do { *(flags) = 0; } while (0)
 #endif
 
-extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
+static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
                                   unsigned long *flags)
 {
        return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
new file mode 100644 (file)
index 0000000..6b20af0
--- /dev/null
@@ -0,0 +1,77 @@
+#ifndef __LINUX_BIT_SPINLOCK_H
+#define __LINUX_BIT_SPINLOCK_H
+
+/*
+ *  bit-based spin_lock()
+ *
+ * Don't use this unless you really need to: spin_lock() and spin_unlock()
+ * are significantly faster.
+ */
+static inline void bit_spin_lock(int bitnum, unsigned long *addr)
+{
+       /*
+        * Assuming the lock is uncontended, this never enters
+        * the body of the outer loop. If it is contended, then
+        * within the inner loop a non-atomic test is used to
+        * busywait with less bus contention for a good time to
+        * attempt to acquire the lock bit.
+        */
+       preempt_disable();
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+       while (test_and_set_bit(bitnum, addr)) {
+               while (test_bit(bitnum, addr)) {
+                       preempt_enable();
+                       cpu_relax();
+                       preempt_disable();
+               }
+       }
+#endif
+       __acquire(bitlock);
+}
+
+/*
+ * Return true if it was acquired
+ */
+static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+{
+       preempt_disable();
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+       if (test_and_set_bit(bitnum, addr)) {
+               preempt_enable();
+               return 0;
+       }
+#endif
+       __acquire(bitlock);
+       return 1;
+}
+
+/*
+ *  bit-based spin_unlock()
+ */
+static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+{
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+       BUG_ON(!test_bit(bitnum, addr));
+       smp_mb__before_clear_bit();
+       clear_bit(bitnum, addr);
+#endif
+       preempt_enable();
+       __release(bitlock);
+}
+
+/*
+ * Return true if the lock is held.
+ */
+static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
+{
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+       return test_bit(bitnum, addr);
+#elif defined CONFIG_PREEMPT
+       return preempt_count();
+#else
+       return 1;
+#endif
+}
+
+#endif /* __LINUX_BIT_SPINLOCK_H */
+
index aefa26f..efdc9b5 100644 (file)
@@ -728,7 +728,7 @@ static inline unsigned int blksize_bits(unsigned int size)
        return bits;
 }
 
-extern inline unsigned int block_size(struct block_device *bdev)
+static inline unsigned int block_size(struct block_device *bdev)
 {
        return bdev->bd_block_size;
 }
index 63035ae..a404c11 100644 (file)
@@ -96,7 +96,7 @@ struct changer_position {
  */
 struct changer_element_status {
        int             ces_type;
-       unsigned char   *ces_data;
+       unsigned char   __user *ces_data;
 };
 #define CESTATUS_FULL     0x01 /* full */
 #define CESTATUS_IMPEXP   0x02 /* media was imported (inserted by sysop) */
index e60bfda..4932ee5 100644 (file)
@@ -19,7 +19,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 
 void dma_pool_destroy(struct dma_pool *pool);
 
-void *dma_pool_alloc(struct dma_pool *pool, int mem_flags, dma_addr_t *handle);
+void *dma_pool_alloc(struct dma_pool *pool, unsigned int __nocast mem_flags,
+                    dma_addr_t *handle);
 
 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
 
index 7f61227..e0b77c5 100644 (file)
@@ -1509,8 +1509,6 @@ extern void do_generic_mapping_read(struct address_space *mapping,
                                    loff_t *, read_descriptor_t *, read_actor_t);
 extern void
 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
-extern ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb,
-       const struct iovec *iov, loff_t offset, unsigned long nr_segs);
 extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, 
        unsigned long nr_segs, loff_t *ppos);
 ssize_t generic_file_writev(struct file *filp, const struct iovec *iov, 
index 84321a4..de09726 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/buffer_head.h>
 #include <linux/journal-head.h>
 #include <linux/stddef.h>
+#include <linux/bit_spinlock.h>
 #include <asm/semaphore.h>
 #endif
 
index d7a2555..6acfdbb 100644 (file)
@@ -254,23 +254,23 @@ static inline u64 get_jiffies_64(void)
  */
 static inline unsigned int jiffies_to_msecs(const unsigned long j)
 {
-#if HZ <= 1000 && !(1000 % HZ)
-       return (1000 / HZ) * j;
-#elif HZ > 1000 && !(HZ % 1000)
-       return (j + (HZ / 1000) - 1)/(HZ / 1000);
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+       return (MSEC_PER_SEC / HZ) * j;
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+       return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
 #else
-       return (j * 1000) / HZ;
+       return (j * MSEC_PER_SEC) / HZ;
 #endif
 }
 
 static inline unsigned int jiffies_to_usecs(const unsigned long j)
 {
-#if HZ <= 1000000 && !(1000000 % HZ)
-       return (1000000 / HZ) * j;
-#elif HZ > 1000000 && !(HZ % 1000000)
-       return (j + (HZ / 1000000) - 1)/(HZ / 1000000);
+#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+       return (USEC_PER_SEC / HZ) * j;
+#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
+       return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
 #else
-       return (j * 1000000) / HZ;
+       return (j * USEC_PER_SEC) / HZ;
 #endif
 }
 
@@ -278,12 +278,12 @@ static inline unsigned long msecs_to_jiffies(const unsigned int m)
 {
        if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
                return MAX_JIFFY_OFFSET;
-#if HZ <= 1000 && !(1000 % HZ)
-       return (m + (1000 / HZ) - 1) / (1000 / HZ);
-#elif HZ > 1000 && !(HZ % 1000)
-       return m * (HZ / 1000);
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+       return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+       return m * (HZ / MSEC_PER_SEC);
 #else
-       return (m * HZ + 999) / 1000;
+       return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
 #endif
 }
 
@@ -291,12 +291,12 @@ static inline unsigned long usecs_to_jiffies(const unsigned int u)
 {
        if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
                return MAX_JIFFY_OFFSET;
-#if HZ <= 1000000 && !(1000000 % HZ)
-       return (u + (1000000 / HZ) - 1) / (1000000 / HZ);
-#elif HZ > 1000000 && !(HZ % 1000000)
-       return u * (HZ / 1000000);
+#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+       return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
+#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
+       return u * (HZ / USEC_PER_SEC);
 #else
-       return (u * HZ + 999999) / 1000000;
+       return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC;
 #endif
 }
 
index 8081a28..9c51917 100644 (file)
@@ -24,7 +24,7 @@
 
 struct radix_tree_root {
        unsigned int            height;
-       int                     gfp_mask;
+       unsigned int            gfp_mask;
        struct radix_tree_node  *rnode;
 };
 
@@ -50,7 +50,7 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long);
 unsigned int
 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
                        unsigned long first_index, unsigned int max_items);
-int radix_tree_preload(int gfp_mask);
+int radix_tree_preload(unsigned int __nocast gfp_mask);
 void radix_tree_init(void);
 void *radix_tree_tag_set(struct radix_tree_root *root,
                        unsigned long index, int tag);
index 17e458e..af00b10 100644 (file)
@@ -2097,7 +2097,7 @@ void reiserfs_free_block(struct reiserfs_transaction_handle *th, struct inode *,
                         b_blocknr_t, int for_unformatted);
 int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *, b_blocknr_t *, int,
                               int);
-extern inline int reiserfs_new_form_blocknrs(struct tree_balance *tb,
+static inline int reiserfs_new_form_blocknrs(struct tree_balance *tb,
                                             b_blocknr_t * new_blocknrs,
                                             int amount_needed)
 {
@@ -2113,7 +2113,7 @@ extern inline int reiserfs_new_form_blocknrs(struct tree_balance *tb,
                                          0);
 }
 
-extern inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle
+static inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle
                                            *th, struct inode *inode,
                                            b_blocknr_t * new_blocknrs,
                                            struct path *path, long block)
@@ -2130,7 +2130,7 @@ extern inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle
 }
 
 #ifdef REISERFS_PREALLOCATE
-extern inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle
+static inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle
                                             *th, struct inode *inode,
                                             b_blocknr_t * new_blocknrs,
                                             struct path *path, long block)
index c551e6a..4b83cb2 100644 (file)
@@ -114,6 +114,7 @@ extern unsigned long nr_iowait(void);
 #define TASK_TRACED            8
 #define EXIT_ZOMBIE            16
 #define EXIT_DEAD              32
+#define TASK_NONINTERACTIVE    64
 
 #define __set_task_state(tsk, state_value)             \
        do { (tsk)->state = (state_value); } while (0)
@@ -202,6 +203,8 @@ extern int in_sched_functions(unsigned long addr);
 
 #define        MAX_SCHEDULE_TIMEOUT    LONG_MAX
 extern signed long FASTCALL(schedule_timeout(signed long timeout));
+extern signed long schedule_timeout_interruptible(signed long timeout);
+extern signed long schedule_timeout_uninterruptible(signed long timeout);
 asmlinkage void schedule(void);
 
 struct namespace;
@@ -782,6 +785,7 @@ struct task_struct {
        short il_next;
 #endif
 #ifdef CONFIG_CPUSETS
+       short cpuset_sem_nest_depth;
        struct cpuset *cpuset;
        nodemask_t mems_allowed;
        int cpuset_mems_generation;
index 42a6bea..1f356f3 100644 (file)
@@ -118,7 +118,8 @@ extern void kfree(const void *);
 extern unsigned int ksize(const void *);
 
 #ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node);
+extern void *kmem_cache_alloc_node(kmem_cache_t *,
+                       unsigned int __nocast flags, int node);
 extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node);
 #else
 static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node)
index d6ba068..cdc99a2 100644 (file)
@@ -2,7 +2,48 @@
 #define __LINUX_SPINLOCK_H
 
 /*
- * include/linux/spinlock.h - generic locking declarations
+ * include/linux/spinlock.h - generic spinlock/rwlock declarations
+ *
+ * here's the role of the various spinlock/rwlock related include files:
+ *
+ * on SMP builds:
+ *
+ *  asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
+ *                        initializers
+ *
+ *  linux/spinlock_types.h:
+ *                        defines the generic type and initializers
+ *
+ *  asm/spinlock.h:       contains the __raw_spin_*()/etc. lowlevel
+ *                        implementations, mostly inline assembly code
+ *
+ *   (also included on UP-debug builds:)
+ *
+ *  linux/spinlock_api_smp.h:
+ *                        contains the prototypes for the _spin_*() APIs.
+ *
+ *  linux/spinlock.h:     builds the final spin_*() APIs.
+ *
+ * on UP builds:
+ *
+ *  linux/spinlock_type_up.h:
+ *                        contains the generic, simplified UP spinlock type.
+ *                        (which is an empty structure on non-debug builds)
+ *
+ *  linux/spinlock_types.h:
+ *                        defines the generic type and initializers
+ *
+ *  linux/spinlock_up.h:
+ *                        contains the __raw_spin_*()/etc. version of UP
+ *                        builds. (which are NOPs on non-debug, non-preempt
+ *                        builds)
+ *
+ *   (included on UP-non-debug builds:)
+ *
+ *  linux/spinlock_api_up.h:
+ *                        builds the _spin_*() APIs.
+ *
+ *  linux/spinlock.h:     builds the final spin_*() APIs.
  */
 
 #include <linux/config.h>
@@ -13,7 +54,6 @@
 #include <linux/kernel.h>
 #include <linux/stringify.h>
 
-#include <asm/processor.h>     /* for cpu relax */
 #include <asm/system.h>
 
 /*
 #define __lockfunc fastcall __attribute__((section(".spinlock.text")))
 
 /*
- * If CONFIG_SMP is set, pull in the _raw_* definitions
+ * Pull the raw_spinlock_t and raw_rwlock_t definitions:
  */
-#ifdef CONFIG_SMP
-
-#define assert_spin_locked(x)  BUG_ON(!spin_is_locked(x))
-#include <asm/spinlock.h>
-
-int __lockfunc _spin_trylock(spinlock_t *lock);
-int __lockfunc _read_trylock(rwlock_t *lock);
-int __lockfunc _write_trylock(rwlock_t *lock);
-
-void __lockfunc _spin_lock(spinlock_t *lock)   __acquires(spinlock_t);
-void __lockfunc _read_lock(rwlock_t *lock)     __acquires(rwlock_t);
-void __lockfunc _write_lock(rwlock_t *lock)    __acquires(rwlock_t);
-
-void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t);
-void __lockfunc _read_unlock(rwlock_t *lock)   __releases(rwlock_t);
-void __lockfunc _write_unlock(rwlock_t *lock)  __releases(rwlock_t);
-
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)  __acquires(spinlock_t);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)    __acquires(rwlock_t);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)   __acquires(rwlock_t);
-
-void __lockfunc _spin_lock_irq(spinlock_t *lock)       __acquires(spinlock_t);
-void __lockfunc _spin_lock_bh(spinlock_t *lock)                __acquires(spinlock_t);
-void __lockfunc _read_lock_irq(rwlock_t *lock)         __acquires(rwlock_t);
-void __lockfunc _read_lock_bh(rwlock_t *lock)          __acquires(rwlock_t);
-void __lockfunc _write_lock_irq(rwlock_t *lock)                __acquires(rwlock_t);
-void __lockfunc _write_lock_bh(rwlock_t *lock)         __acquires(rwlock_t);
-
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(spinlock_t);
-void __lockfunc _spin_unlock_irq(spinlock_t *lock)                             __releases(spinlock_t);
-void __lockfunc _spin_unlock_bh(spinlock_t *lock)                              __releases(spinlock_t);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)   __releases(rwlock_t);
-void __lockfunc _read_unlock_irq(rwlock_t *lock)                               __releases(rwlock_t);
-void __lockfunc _read_unlock_bh(rwlock_t *lock)                                        __releases(rwlock_t);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)  __releases(rwlock_t);
-void __lockfunc _write_unlock_irq(rwlock_t *lock)                              __releases(rwlock_t);
-void __lockfunc _write_unlock_bh(rwlock_t *lock)                               __releases(rwlock_t);
-
-int __lockfunc _spin_trylock_bh(spinlock_t *lock);
-int __lockfunc generic_raw_read_trylock(rwlock_t *lock);
-int in_lock_functions(unsigned long addr);
-
-#else
+#include <linux/spinlock_types.h>
 
-#define in_lock_functions(ADDR) 0
+extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
 
-#if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK)
-# define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
-# define ATOMIC_DEC_AND_LOCK
-#endif
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPINLOCK_MAGIC 0x1D244B3C
-typedef struct {
-       unsigned long magic;
-       volatile unsigned long lock;
-       volatile unsigned int babble;
-       const char *module;
-       char *owner;
-       int oline;
-} spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0}
-
-#define spin_lock_init(x) \
-       do { \
-               (x)->magic = SPINLOCK_MAGIC; \
-               (x)->lock = 0; \
-               (x)->babble = 5; \
-               (x)->module = __FILE__; \
-               (x)->owner = NULL; \
-               (x)->oline = 0; \
-       } while (0)
-
-#define CHECK_LOCK(x) \
-       do { \
-               if ((x)->magic != SPINLOCK_MAGIC) { \
-                       printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \
-                                       __FILE__, __LINE__, (x)); \
-               } \
-       } while(0)
-
-#define _raw_spin_lock(x)              \
-       do { \
-               CHECK_LOCK(x); \
-               if ((x)->lock&&(x)->babble) { \
-                       (x)->babble--; \
-                       printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \
-                                       __FILE__,__LINE__, (x)->module, \
-                                       (x), (x)->owner, (x)->oline); \
-               } \
-               (x)->lock = 1; \
-               (x)->owner = __FILE__; \
-               (x)->oline = __LINE__; \
-       } while (0)
-
-/* without debugging, spin_is_locked on UP always says
- * FALSE. --> printk if already locked. */
-#define spin_is_locked(x) \
-       ({ \
-               CHECK_LOCK(x); \
-               if ((x)->lock&&(x)->babble) { \
-                       (x)->babble--; \
-                       printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \
-                                       __FILE__,__LINE__, (x)->module, \
-                                       (x), (x)->owner, (x)->oline); \
-               } \
-               0; \
-       })
-
-/* with debugging, assert_spin_locked() on UP does check
- * the lock value properly */
-#define assert_spin_locked(x) \
-       ({ \
-               CHECK_LOCK(x); \
-               BUG_ON(!(x)->lock); \
-       })
-
-/* without debugging, spin_trylock on UP always says
- * TRUE. --> printk if already locked. */
-#define _raw_spin_trylock(x) \
-       ({ \
-               CHECK_LOCK(x); \
-               if ((x)->lock&&(x)->babble) { \
-                       (x)->babble--; \
-                       printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \
-                                       __FILE__,__LINE__, (x)->module, \
-                                       (x), (x)->owner, (x)->oline); \
-               } \
-               (x)->lock = 1; \
-               (x)->owner = __FILE__; \
-               (x)->oline = __LINE__; \
-               1; \
-       })
-
-#define spin_unlock_wait(x)    \
-       do { \
-               CHECK_LOCK(x); \
-               if ((x)->lock&&(x)->babble) { \
-                       (x)->babble--; \
-                       printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \
-                                       __FILE__,__LINE__, (x)->module, (x), \
-                                       (x)->owner, (x)->oline); \
-               }\
-       } while (0)
-
-#define _raw_spin_unlock(x) \
-       do { \
-               CHECK_LOCK(x); \
-               if (!(x)->lock&&(x)->babble) { \
-                       (x)->babble--; \
-                       printk("%s:%d: spin_unlock(%s:%p) not locked\n", \
-                                       __FILE__,__LINE__, (x)->module, (x));\
-               } \
-               (x)->lock = 0; \
-       } while (0)
-#else
 /*
- * gcc versions before ~2.95 have a nasty bug with empty initializers.
+ * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
  */
-#if (__GNUC__ > 2)
-  typedef struct { } spinlock_t;
-  #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
+#if defined(CONFIG_SMP)
+# include <asm/spinlock.h>
 #else
-  typedef struct { int gcc_is_buggy; } spinlock_t;
-  #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+# include <linux/spinlock_up.h>
 #endif
 
+#define spin_lock_init(lock)   do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
+#define rwlock_init(lock)      do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
+
+#define spin_is_locked(lock)   __raw_spin_is_locked(&(lock)->raw_lock)
+
+/**
+ * spin_unlock_wait - wait until the spinlock gets unlocked
+ * @lock: the spinlock in question.
+ */
+#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
+
 /*
- * If CONFIG_SMP is unset, declare the _raw_* definitions as nops
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  */
-#define spin_lock_init(lock)   do { (void)(lock); } while(0)
-#define _raw_spin_lock(lock)   do { (void)(lock); } while(0)
-#define spin_is_locked(lock)   ((void)(lock), 0)
-#define assert_spin_locked(lock)       do { (void)(lock); } while(0)
-#define _raw_spin_trylock(lock)        (((void)(lock), 1))
-#define spin_unlock_wait(lock) (void)(lock)
-#define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
-/* RW spinlocks: No debug version */
-
-#if (__GNUC__ > 2)
-  typedef struct { } rwlock_t;
-  #define RW_LOCK_UNLOCKED (rwlock_t) { }
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
 #else
-  typedef struct { int gcc_is_buggy; } rwlock_t;
-  #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
+# include <linux/spinlock_api_up.h>
 #endif
 
-#define rwlock_init(lock)      do { (void)(lock); } while(0)
-#define _raw_read_lock(lock)   do { (void)(lock); } while(0)
-#define _raw_read_unlock(lock) do { (void)(lock); } while(0)
-#define _raw_write_lock(lock)  do { (void)(lock); } while(0)
-#define _raw_write_unlock(lock)        do { (void)(lock); } while(0)
-#define read_can_lock(lock)    (((void)(lock), 1))
-#define write_can_lock(lock)   (((void)(lock), 1))
-#define _raw_read_trylock(lock) ({ (void)(lock); (1); })
-#define _raw_write_trylock(lock) ({ (void)(lock); (1); })
-
-#define _spin_trylock(lock)    ({preempt_disable(); _raw_spin_trylock(lock) ? \
-                               1 : ({preempt_enable(); 0;});})
-
-#define _read_trylock(lock)    ({preempt_disable();_raw_read_trylock(lock) ? \
-                               1 : ({preempt_enable(); 0;});})
-
-#define _write_trylock(lock)   ({preempt_disable(); _raw_write_trylock(lock) ? \
-                               1 : ({preempt_enable(); 0;});})
-
-#define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \
-                               _raw_spin_trylock(lock) ? \
-                               1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});})
-
-#define _spin_lock(lock)       \
-do { \
-       preempt_disable(); \
-       _raw_spin_lock(lock); \
-       __acquire(lock); \
-} while(0)
-
-#define _write_lock(lock) \
-do { \
-       preempt_disable(); \
-       _raw_write_lock(lock); \
-       __acquire(lock); \
-} while(0)
-#define _read_lock(lock)       \
-do { \
-       preempt_disable(); \
-       _raw_read_lock(lock); \
-       __acquire(lock); \
-} while(0)
-
-#define _spin_unlock(lock) \
-do { \
-       _raw_spin_unlock(lock); \
-       preempt_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _write_unlock(lock) \
-do { \
-       _raw_write_unlock(lock); \
-       preempt_enable(); \
-       __release(lock); \
-} while(0)
-
-#define _read_unlock(lock) \
-do { \
-       _raw_read_unlock(lock); \
-       preempt_enable(); \
-       __release(lock); \
-} while(0)
-
-#define _spin_lock_irqsave(lock, flags) \
-do {   \
-       local_irq_save(flags); \
-       preempt_disable(); \
-       _raw_spin_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _spin_lock_irq(lock) \
-do { \
-       local_irq_disable(); \
-       preempt_disable(); \
-       _raw_spin_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _spin_lock_bh(lock) \
-do { \
-       local_bh_disable(); \
-       preempt_disable(); \
-       _raw_spin_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _read_lock_irqsave(lock, flags) \
-do {   \
-       local_irq_save(flags); \
-       preempt_disable(); \
-       _raw_read_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _read_lock_irq(lock) \
-do { \
-       local_irq_disable(); \
-       preempt_disable(); \
-       _raw_read_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _read_lock_bh(lock) \
-do { \
-       local_bh_disable(); \
-       preempt_disable(); \
-       _raw_read_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _write_lock_irqsave(lock, flags) \
-do {   \
-       local_irq_save(flags); \
-       preempt_disable(); \
-       _raw_write_lock(lock); \
-       __acquire(lock); \
-} while (0)
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void _raw_spin_lock(spinlock_t *lock);
+#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+ extern int _raw_spin_trylock(spinlock_t *lock);
+ extern void _raw_spin_unlock(spinlock_t *lock);
+
+ extern void _raw_read_lock(rwlock_t *lock);
+ extern int _raw_read_trylock(rwlock_t *lock);
+ extern void _raw_read_unlock(rwlock_t *lock);
+ extern void _raw_write_lock(rwlock_t *lock);
+ extern int _raw_write_trylock(rwlock_t *lock);
+ extern void _raw_write_unlock(rwlock_t *lock);
+#else
+# define _raw_spin_unlock(lock)                __raw_spin_unlock(&(lock)->raw_lock)
+# define _raw_spin_trylock(lock)       __raw_spin_trylock(&(lock)->raw_lock)
+# define _raw_spin_lock(lock)          __raw_spin_lock(&(lock)->raw_lock)
+# define _raw_spin_lock_flags(lock, flags) \
+               __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_read_lock(rwlock)                __raw_read_lock(&(rwlock)->raw_lock)
+# define _raw_write_lock(rwlock)       __raw_write_lock(&(rwlock)->raw_lock)
+# define _raw_read_unlock(rwlock)      __raw_read_unlock(&(rwlock)->raw_lock)
+# define _raw_write_unlock(rwlock)     __raw_write_unlock(&(rwlock)->raw_lock)
+# define _raw_read_trylock(rwlock)     __raw_read_trylock(&(rwlock)->raw_lock)
+# define _raw_write_trylock(rwlock)    __raw_write_trylock(&(rwlock)->raw_lock)
+#endif
 
-#define _write_lock_irq(lock) \
-do { \
-       local_irq_disable(); \
-       preempt_disable(); \
-       _raw_write_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _write_lock_bh(lock) \
-do { \
-       local_bh_disable(); \
-       preempt_disable(); \
-       _raw_write_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _spin_unlock_irqrestore(lock, flags) \
-do { \
-       _raw_spin_unlock(lock); \
-       local_irq_restore(flags); \
-       preempt_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _spin_unlock_irq(lock) \
-do { \
-       _raw_spin_unlock(lock); \
-       local_irq_enable(); \
-       preempt_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _spin_unlock_bh(lock) \
-do { \
-       _raw_spin_unlock(lock); \
-       preempt_enable_no_resched(); \
-       local_bh_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _write_unlock_bh(lock) \
-do { \
-       _raw_write_unlock(lock); \
-       preempt_enable_no_resched(); \
-       local_bh_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _read_unlock_irqrestore(lock, flags) \
-do { \
-       _raw_read_unlock(lock); \
-       local_irq_restore(flags); \
-       preempt_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _write_unlock_irqrestore(lock, flags) \
-do { \
-       _raw_write_unlock(lock); \
-       local_irq_restore(flags); \
-       preempt_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _read_unlock_irq(lock) \
-do { \
-       _raw_read_unlock(lock); \
-       local_irq_enable();     \
-       preempt_enable();       \
-       __release(lock); \
-} while (0)
-
-#define _read_unlock_bh(lock)  \
-do { \
-       _raw_read_unlock(lock); \
-       preempt_enable_no_resched();    \
-       local_bh_enable();      \
-       __release(lock); \
-} while (0)
-
-#define _write_unlock_irq(lock)        \
-do { \
-       _raw_write_unlock(lock);        \
-       local_irq_enable();     \
-       preempt_enable();       \
-       __release(lock); \
-} while (0)
-
-#endif /* !SMP */
+#define read_can_lock(rwlock)          __raw_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock)         __raw_write_can_lock(&(rwlock)->raw_lock)
 
 /*
  * Define the various spin_lock and rw_lock methods.  Note we define these
  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
  * methods are defined as nops in the case they are not required.
  */
-#define spin_trylock(lock)     __cond_lock(_spin_trylock(lock))
-#define read_trylock(lock)     __cond_lock(_read_trylock(lock))
-#define write_trylock(lock)    __cond_lock(_write_trylock(lock))
+#define spin_trylock(lock)             __cond_lock(_spin_trylock(lock))
+#define read_trylock(lock)             __cond_lock(_read_trylock(lock))
+#define write_trylock(lock)            __cond_lock(_write_trylock(lock))
 
-#define spin_lock(lock)                _spin_lock(lock)
-#define write_lock(lock)       _write_lock(lock)
-#define read_lock(lock)                _read_lock(lock)
+#define spin_lock(lock)                        _spin_lock(lock)
+#define write_lock(lock)               _write_lock(lock)
+#define read_lock(lock)                        _read_lock(lock)
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock)
 #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock)
 #define write_lock_irqsave(lock, flags)        flags = _write_lock_irqsave(lock)
@@ -470,137 +171,59 @@ do { \
 #define write_lock_irq(lock)           _write_lock_irq(lock)
 #define write_lock_bh(lock)            _write_lock_bh(lock)
 
-#define spin_unlock(lock)      _spin_unlock(lock)
-#define write_unlock(lock)     _write_unlock(lock)
-#define read_unlock(lock)      _read_unlock(lock)
+#define spin_unlock(lock)              _spin_unlock(lock)
+#define write_unlock(lock)             _write_unlock(lock)
+#define read_unlock(lock)              _read_unlock(lock)
 
-#define spin_unlock_irqrestore(lock, flags)    _spin_unlock_irqrestore(lock, flags)
+#define spin_unlock_irqrestore(lock, flags) \
+                                       _spin_unlock_irqrestore(lock, flags)
 #define spin_unlock_irq(lock)          _spin_unlock_irq(lock)
 #define spin_unlock_bh(lock)           _spin_unlock_bh(lock)
 
-#define read_unlock_irqrestore(lock, flags)    _read_unlock_irqrestore(lock, flags)
-#define read_unlock_irq(lock)                  _read_unlock_irq(lock)
-#define read_unlock_bh(lock)                   _read_unlock_bh(lock)
+#define read_unlock_irqrestore(lock, flags) \
+                                       _read_unlock_irqrestore(lock, flags)
+#define read_unlock_irq(lock)          _read_unlock_irq(lock)
+#define read_unlock_bh(lock)           _read_unlock_bh(lock)
 
-#define write_unlock_irqrestore(lock, flags)   _write_unlock_irqrestore(lock, flags)
-#define write_unlock_irq(lock)                 _write_unlock_irq(lock)
-#define write_unlock_bh(lock)                  _write_unlock_bh(lock)
+#define write_unlock_irqrestore(lock, flags) \
+                                       _write_unlock_irqrestore(lock, flags)
+#define write_unlock_irq(lock)         _write_unlock_irq(lock)
+#define write_unlock_bh(lock)          _write_unlock_bh(lock)
 
-#define spin_trylock_bh(lock)                  __cond_lock(_spin_trylock_bh(lock))
+#define spin_trylock_bh(lock)          __cond_lock(_spin_trylock_bh(lock))
 
 #define spin_trylock_irq(lock) \
 ({ \
        local_irq_disable(); \
        _spin_trylock(lock) ? \
-       1 : ({local_irq_enable(); 0; }); \
+       1 : ({ local_irq_enable(); 0;  }); \
 })
 
 #define spin_trylock_irqsave(lock, flags) \
 ({ \
        local_irq_save(flags); \
        _spin_trylock(lock) ? \
-       1 : ({local_irq_restore(flags); 0;}); \
+       1 : ({ local_irq_restore(flags); 0; }); \
 })
 
-#ifdef CONFIG_LOCKMETER
-extern void _metered_spin_lock   (spinlock_t *lock);
-extern void _metered_spin_unlock (spinlock_t *lock);
-extern int  _metered_spin_trylock(spinlock_t *lock);
-extern void _metered_read_lock    (rwlock_t *lock);
-extern void _metered_read_unlock  (rwlock_t *lock);
-extern void _metered_write_lock   (rwlock_t *lock);
-extern void _metered_write_unlock (rwlock_t *lock);
-extern int  _metered_read_trylock (rwlock_t *lock);
-extern int  _metered_write_trylock(rwlock_t *lock);
-#endif
-
-/* "lock on reference count zero" */
-#ifndef ATOMIC_DEC_AND_LOCK
-#include <asm/atomic.h>
-extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
-#endif
-
-#define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock))
-
-/*
- *  bit-based spin_lock()
- *
- * Don't use this unless you really need to: spin_lock() and spin_unlock()
- * are significantly faster.
- */
-static inline void bit_spin_lock(int bitnum, unsigned long *addr)
-{
-       /*
-        * Assuming the lock is uncontended, this never enters
-        * the body of the outer loop. If it is contended, then
-        * within the inner loop a non-atomic test is used to
-        * busywait with less bus contention for a good time to
-        * attempt to acquire the lock bit.
-        */
-       preempt_disable();
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-       while (test_and_set_bit(bitnum, addr)) {
-               while (test_bit(bitnum, addr)) {
-                       preempt_enable();
-                       cpu_relax();
-                       preempt_disable();
-               }
-       }
-#endif
-       __acquire(bitlock);
-}
-
 /*
- * Return true if it was acquired
+ * Pull the atomic_t declaration:
+ * (asm-mips/atomic.h needs above definitions)
  */
-static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
-{
-       preempt_disable();      
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-       if (test_and_set_bit(bitnum, addr)) {
-               preempt_enable();
-               return 0;
-       }
-#endif
-       __acquire(bitlock);
-       return 1;
-}
-
-/*
- *  bit-based spin_unlock()
- */
-static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
-{
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-       BUG_ON(!test_bit(bitnum, addr));
-       smp_mb__before_clear_bit();
-       clear_bit(bitnum, addr);
-#endif
-       preempt_enable();
-       __release(bitlock);
-}
-
-/*
- * Return true if the lock is held.
+#include <asm/atomic.h>
+/**
+ * atomic_dec_and_lock - lock on reaching reference count zero
+ * @atomic: the atomic counter
+ * @lock: the spinlock in question
  */
-static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
-{
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-       return test_bit(bitnum, addr);
-#elif defined CONFIG_PREEMPT
-       return preempt_count();
-#else
-       return 1;
-#endif
-}
-
-#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
-#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
+extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
+#define atomic_dec_and_lock(atomic, lock) \
+               __cond_lock(_atomic_dec_and_lock(atomic, lock))
 
 /**
  * spin_can_lock - would spin_trylock() succeed?
  * @lock: the spinlock in question.
  */
-#define spin_can_lock(lock)            (!spin_is_locked(lock))
+#define spin_can_lock(lock)    (!spin_is_locked(lock))
 
 #endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
new file mode 100644 (file)
index 0000000..78e6989
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef __LINUX_SPINLOCK_API_SMP_H
+#define __LINUX_SPINLOCK_API_SMP_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+int in_lock_functions(unsigned long addr);
+
+#define assert_spin_locked(x)  BUG_ON(!spin_is_locked(x))
+
+void __lockfunc _spin_lock(spinlock_t *lock)           __acquires(spinlock_t);
+void __lockfunc _read_lock(rwlock_t *lock)             __acquires(rwlock_t);
+void __lockfunc _write_lock(rwlock_t *lock)            __acquires(rwlock_t);
+void __lockfunc _spin_lock_bh(spinlock_t *lock)                __acquires(spinlock_t);
+void __lockfunc _read_lock_bh(rwlock_t *lock)          __acquires(rwlock_t);
+void __lockfunc _write_lock_bh(rwlock_t *lock)         __acquires(rwlock_t);
+void __lockfunc _spin_lock_irq(spinlock_t *lock)       __acquires(spinlock_t);
+void __lockfunc _read_lock_irq(rwlock_t *lock)         __acquires(rwlock_t);
+void __lockfunc _write_lock_irq(rwlock_t *lock)                __acquires(rwlock_t);
+unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+                                                       __acquires(spinlock_t);
+unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+                                                       __acquires(rwlock_t);
+unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+                                                       __acquires(rwlock_t);
+int __lockfunc _spin_trylock(spinlock_t *lock);
+int __lockfunc _read_trylock(rwlock_t *lock);
+int __lockfunc _write_trylock(rwlock_t *lock);
+int __lockfunc _spin_trylock_bh(spinlock_t *lock);
+void __lockfunc _spin_unlock(spinlock_t *lock)         __releases(spinlock_t);
+void __lockfunc _read_unlock(rwlock_t *lock)           __releases(rwlock_t);
+void __lockfunc _write_unlock(rwlock_t *lock)          __releases(rwlock_t);
+void __lockfunc _spin_unlock_bh(spinlock_t *lock)      __releases(spinlock_t);
+void __lockfunc _read_unlock_bh(rwlock_t *lock)                __releases(rwlock_t);
+void __lockfunc _write_unlock_bh(rwlock_t *lock)       __releases(rwlock_t);
+void __lockfunc _spin_unlock_irq(spinlock_t *lock)     __releases(spinlock_t);
+void __lockfunc _read_unlock_irq(rwlock_t *lock)       __releases(rwlock_t);
+void __lockfunc _write_unlock_irq(rwlock_t *lock)      __releases(rwlock_t);
+void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+                                                       __releases(spinlock_t);
+void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+                                                       __releases(rwlock_t);
+void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+                                                       __releases(rwlock_t);
+
+#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
new file mode 100644 (file)
index 0000000..cd81cee
--- /dev/null
@@ -0,0 +1,80 @@
+#ifndef __LINUX_SPINLOCK_API_UP_H
+#define __LINUX_SPINLOCK_API_UP_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_api_up.h
+ *
+ * spinlock API implementation on UP-nondebug (inlined implementation)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#define in_lock_functions(ADDR)                0
+
+#define assert_spin_locked(lock)       do { (void)(lock); } while (0)
+
+/*
+ * In the UP-nondebug case there's no real locking going on, so the
+ * only thing we have to do is to keep the preempt counts and irq
+ * flags straight, to supress compiler warnings of unused lock
+ * variables, and to add the proper checker annotations:
+ */
+#define __LOCK(lock) \
+  do { preempt_disable(); __acquire(lock); (void)(lock); } while (0)
+
+#define __LOCK_BH(lock) \
+  do { local_bh_disable(); __LOCK(lock); } while (0)
+
+#define __LOCK_IRQ(lock) \
+  do { local_irq_disable(); __LOCK(lock); } while (0)
+
+#define __LOCK_IRQSAVE(lock, flags) \
+  do { local_irq_save(flags); __LOCK(lock); } while (0)
+
+#define __UNLOCK(lock) \
+  do { preempt_enable(); __release(lock); (void)(lock); } while (0)
+
+#define __UNLOCK_BH(lock) \
+  do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0)
+
+#define __UNLOCK_IRQ(lock) \
+  do { local_irq_enable(); __UNLOCK(lock); } while (0)
+
+#define __UNLOCK_IRQRESTORE(lock, flags) \
+  do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
+
+#define _spin_lock(lock)                       __LOCK(lock)
+#define _read_lock(lock)                       __LOCK(lock)
+#define _write_lock(lock)                      __LOCK(lock)
+#define _spin_lock_bh(lock)                    __LOCK_BH(lock)
+#define _read_lock_bh(lock)                    __LOCK_BH(lock)
+#define _write_lock_bh(lock)                   __LOCK_BH(lock)
+#define _spin_lock_irq(lock)                   __LOCK_IRQ(lock)
+#define _read_lock_irq(lock)                   __LOCK_IRQ(lock)
+#define _write_lock_irq(lock)                  __LOCK_IRQ(lock)
+#define _spin_lock_irqsave(lock, flags)                __LOCK_IRQSAVE(lock, flags)
+#define _read_lock_irqsave(lock, flags)                __LOCK_IRQSAVE(lock, flags)
+#define _write_lock_irqsave(lock, flags)       __LOCK_IRQSAVE(lock, flags)
+#define _spin_trylock(lock)                    ({ __LOCK(lock); 1; })
+#define _read_trylock(lock)                    ({ __LOCK(lock); 1; })
+#define _write_trylock(lock)                   ({ __LOCK(lock); 1; })
+#define _spin_trylock_bh(lock)                 ({ __LOCK_BH(lock); 1; })
+#define _spin_unlock(lock)                     __UNLOCK(lock)
+#define _read_unlock(lock)                     __UNLOCK(lock)
+#define _write_unlock(lock)                    __UNLOCK(lock)
+#define _spin_unlock_bh(lock)                  __UNLOCK_BH(lock)
+#define _write_unlock_bh(lock)                 __UNLOCK_BH(lock)
+#define _read_unlock_bh(lock)                  __UNLOCK_BH(lock)
+#define _spin_unlock_irq(lock)                 __UNLOCK_IRQ(lock)
+#define _read_unlock_irq(lock)                 __UNLOCK_IRQ(lock)
+#define _write_unlock_irq(lock)                        __UNLOCK_IRQ(lock)
+#define _spin_unlock_irqrestore(lock, flags)   __UNLOCK_IRQRESTORE(lock, flags)
+#define _read_unlock_irqrestore(lock, flags)   __UNLOCK_IRQRESTORE(lock, flags)
+#define _write_unlock_irqrestore(lock, flags)  __UNLOCK_IRQRESTORE(lock, flags)
+
+#endif /* __LINUX_SPINLOCK_API_UP_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
new file mode 100644 (file)
index 0000000..9cb51e0
--- /dev/null
@@ -0,0 +1,67 @@
+#ifndef __LINUX_SPINLOCK_TYPES_H
+#define __LINUX_SPINLOCK_TYPES_H
+
+/*
+ * include/linux/spinlock_types.h - generic spinlock type definitions
+ *                                  and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+typedef struct {
+       raw_spinlock_t raw_lock;
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
+       unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+       unsigned int magic, owner_cpu;
+       void *owner;
+#endif
+} spinlock_t;
+
+#define SPINLOCK_MAGIC         0xdead4ead
+
+typedef struct {
+       raw_rwlock_t raw_lock;
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
+       unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+       unsigned int magic, owner_cpu;
+       void *owner;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC           0xdeaf1eed
+
+#define SPINLOCK_OWNER_INIT    ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_LOCK_UNLOCKED                                            \
+       (spinlock_t)    {       .raw_lock = __RAW_SPIN_LOCK_UNLOCKED,   \
+                               .magic = SPINLOCK_MAGIC,                \
+                               .owner = SPINLOCK_OWNER_INIT,           \
+                               .owner_cpu = -1 }
+#define RW_LOCK_UNLOCKED                                               \
+       (rwlock_t)      {       .raw_lock = __RAW_RW_LOCK_UNLOCKED,     \
+                               .magic = RWLOCK_MAGIC,                  \
+                               .owner = SPINLOCK_OWNER_INIT,           \
+                               .owner_cpu = -1 }
+#else
+# define SPIN_LOCK_UNLOCKED \
+       (spinlock_t)    {       .raw_lock = __RAW_SPIN_LOCK_UNLOCKED }
+#define RW_LOCK_UNLOCKED \
+       (rwlock_t)      {       .raw_lock = __RAW_RW_LOCK_UNLOCKED }
+#endif
+
+#define DEFINE_SPINLOCK(x)     spinlock_t x = SPIN_LOCK_UNLOCKED
+#define DEFINE_RWLOCK(x)       rwlock_t x = RW_LOCK_UNLOCKED
+
+#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
new file mode 100644 (file)
index 0000000..def2d17
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef __LINUX_SPINLOCK_TYPES_UP_H
+#define __LINUX_SPINLOCK_TYPES_UP_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_types_up.h - spinlock type definitions for UP
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+
+typedef struct {
+       volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+
+#else
+
+/*
+ * All gcc 2.95 versions and early versions of 2.96 have a nasty bug
+ * with empty initializers.
+ */
+#if (__GNUC__ > 2)
+typedef struct { } raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { }
+#else
+typedef struct { int gcc_is_buggy; } raw_spinlock_t;
+#define __RAW_SPIN_LOCK_UNLOCKED (raw_spinlock_t) { 0 }
+#endif
+
+#endif
+
+#if (__GNUC__ > 2)
+typedef struct {
+       /* no debug version on UP */
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED { }
+#else
+typedef struct { int gcc_is_buggy; } raw_rwlock_t;
+#define __RAW_RW_LOCK_UNLOCKED (raw_rwlock_t) { 0 }
+#endif
+
+#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
new file mode 100644 (file)
index 0000000..31accf2
--- /dev/null
@@ -0,0 +1,74 @@
+#ifndef __LINUX_SPINLOCK_UP_H
+#define __LINUX_SPINLOCK_UP_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_up.h - UP-debug version of spinlocks.
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ *
+ * In the debug case, 1 means unlocked, 0 means locked. (the values
+ * are inverted, to catch initialization bugs)
+ *
+ * No atomicity anywhere, we are on UP.
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+
+#define __raw_spin_is_locked(x)                ((x)->slock == 0)
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+       lock->slock = 0;
+}
+
+static inline void
+__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+{
+       local_irq_save(flags);
+       lock->slock = 0;
+}
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+       char oldval = lock->slock;
+
+       lock->slock = 0;
+
+       return oldval > 0;
+}
+
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+       lock->slock = 1;
+}
+
+/*
+ * Read-write spinlocks. No debug version.
+ */
+#define __raw_read_lock(lock)          do { (void)(lock); } while (0)
+#define __raw_write_lock(lock)         do { (void)(lock); } while (0)
+#define __raw_read_trylock(lock)       ({ (void)(lock); 1; })
+#define __raw_write_trylock(lock)      ({ (void)(lock); 1; })
+#define __raw_read_unlock(lock)                do { (void)(lock); } while (0)
+#define __raw_write_unlock(lock)       do { (void)(lock); } while (0)
+
+#else /* DEBUG_SPINLOCK */
+#define __raw_spin_is_locked(lock)     ((void)(lock), 0)
+/* for sched.c and kernel_lock.c: */
+# define __raw_spin_lock(lock)         do { (void)(lock); } while (0)
+# define __raw_spin_unlock(lock)       do { (void)(lock); } while (0)
+# define __raw_spin_trylock(lock)      ({ (void)(lock); 1; })
+#endif /* DEBUG_SPINLOCK */
+
+#define __raw_read_can_lock(lock)      (((void)(lock), 1))
+#define __raw_write_can_lock(lock)     (((void)(lock), 1))
+
+#define __raw_spin_unlock_wait(lock) \
+               do { cpu_relax(); } while (__raw_spin_is_locked(lock))
+
+#endif /* __LINUX_SPINLOCK_UP_H */
index c10d4c2..8e83f4e 100644 (file)
@@ -28,17 +28,10 @@ struct timezone {
 #ifdef __KERNEL__
 
 /* Parameters used to convert the timespec values */
-#ifndef USEC_PER_SEC
+#define MSEC_PER_SEC (1000L)
 #define USEC_PER_SEC (1000000L)
-#endif
-
-#ifndef NSEC_PER_SEC
 #define NSEC_PER_SEC (1000000000L)
-#endif
-
-#ifndef NSEC_PER_USEC
 #define NSEC_PER_USEC (1000L)
-#endif
 
 static __inline__ int timespec_equal(struct timespec *a, struct timespec *b) 
 { 
index 542dbae..343d883 100644 (file)
@@ -109,8 +109,6 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);
 int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
 int sync_page_range(struct inode *inode, struct address_space *mapping,
                        loff_t pos, size_t count);
-int sync_page_range_nolock(struct inode *inode, struct address_space
-               *mapping, loff_t pos, size_t count);
 
 /* pdflush.c */
 extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
index 0acf245..3a92601 100644 (file)
@@ -69,7 +69,7 @@ struct mqueue_inode_info {
 
        struct sigevent notify;
        pid_t notify_owner;
-       struct user_struct *user;       /* user who created, for accouting */
+       struct user_struct *user;       /* user who created, for accounting */
        struct sock *notify_sock;
        struct sk_buff *notify_cookie;
 
index 8d57a2f..ff4dc02 100644 (file)
@@ -12,6 +12,7 @@ obj-y     = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
 obj-$(CONFIG_FUTEX) += futex.o
 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
 obj-$(CONFIG_SMP) += cpu.o spinlock.o
+obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
 obj-$(CONFIG_UID16) += uid16.o
 obj-$(CONFIG_MODULES) += module.o
 obj-$(CONFIG_KALLSYMS) += kallsyms.o
index f70e602..b756f52 100644 (file)
@@ -165,7 +165,7 @@ out:
 }
 
 /*
- * Close the old accouting file (if currently open) and then replace
+ * Close the old accounting file (if currently open) and then replace
  * it with file (if non-NULL).
  *
  * NOTE: acct_globals.lock MUST be held on entry and exit.
@@ -199,11 +199,16 @@ static void acct_file_reopen(struct file *file)
        }
 }
 
-/*
- *  sys_acct() is the only system call needed to implement process
- *  accounting. It takes the name of the file where accounting records
- *  should be written. If the filename is NULL, accounting will be
- *  shutdown.
+/**
+ * sys_acct - enable/disable process accounting
+ * @name: file name for accounting records or NULL to shutdown accounting
+ *
+ * Returns 0 for success or negative errno values for failure.
+ *
+ * sys_acct() is the only system call needed to implement process
+ * accounting. It takes the name of the file where accounting records
+ * should be written. If the filename is NULL, accounting will be
+ * shutdown.
  */
 asmlinkage long sys_acct(const char __user *name)
 {
@@ -250,9 +255,12 @@ asmlinkage long sys_acct(const char __user *name)
        return (0);
 }
 
-/*
- * If the accouting is turned on for a file in the filesystem pointed
- * to by sb, turn accouting off.
+/**
+ * acct_auto_close - turn off a filesystem's accounting if it is on
+ * @sb: super block for the filesystem
+ *
+ * If the accounting is turned on for a file in the filesystem pointed
+ * to by sb, turn accounting off.
  */
 void acct_auto_close(struct super_block *sb)
 {
@@ -503,8 +511,11 @@ static void do_acct_process(long exitcode, struct file *file)
        set_fs(fs);
 }
 
-/*
+/**
  * acct_process - now just a wrapper around do_acct_process
+ * @exitcode: task exit code
+ *
+ * handles process accounting for an exiting task
  */
 void acct_process(long exitcode)
 {
@@ -530,9 +541,9 @@ void acct_process(long exitcode)
 }
 
 
-/*
- * acct_update_integrals
- *    -  update mm integral fields in task_struct
+/**
+ * acct_update_integrals - update mm integral fields in task_struct
+ * @tsk: task_struct for accounting
  */
 void acct_update_integrals(struct task_struct *tsk)
 {
@@ -547,9 +558,9 @@ void acct_update_integrals(struct task_struct *tsk)
        }
 }
 
-/*
- * acct_clear_integrals
- *    - clear the mm integral fields in task_struct
+/**
+ * acct_clear_integrals - clear the mm integral fields in task_struct
+ * @tsk: task_struct whose accounting fields are cleared
  */
 void acct_clear_integrals(struct task_struct *tsk)
 {
index ddfcaaa..102296e 100644 (file)
@@ -48,8 +48,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
        if (!time_after(expire, now))
                return 0;
 
-       current->state = TASK_INTERRUPTIBLE;
-       expire = schedule_timeout(expire - now);
+       expire = schedule_timeout_interruptible(expire - now);
        if (expire == 0)
                return 0;
 
@@ -82,8 +81,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
                return -EINVAL;
 
        expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
-       current->state = TASK_INTERRUPTIBLE;
-       expire = schedule_timeout(expire);
+       expire = schedule_timeout_interruptible(expire);
        if (expire == 0)
                return 0;
 
@@ -795,8 +793,7 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese,
                        recalc_sigpending();
                        spin_unlock_irq(&current->sighand->siglock);
 
-                       current->state = TASK_INTERRUPTIBLE;
-                       timeout = schedule_timeout(timeout);
+                       timeout = schedule_timeout_interruptible(timeout);
 
                        spin_lock_irq(&current->sighand->siglock);
                        sig = dequeue_signal(current, &s, &info);
index 712d020..407b5f0 100644 (file)
@@ -181,6 +181,37 @@ static struct super_block *cpuset_sb = NULL;
 
 static DECLARE_MUTEX(cpuset_sem);
 
+/*
+ * The global cpuset semaphore cpuset_sem can be needed by the
+ * memory allocator to update a tasks mems_allowed (see the calls
+ * to cpuset_update_current_mems_allowed()) or to walk up the
+ * cpuset hierarchy to find a mem_exclusive cpuset see the calls
+ * to cpuset_excl_nodes_overlap()).
+ *
+ * But if the memory allocation is being done by cpuset.c code, it
+ * usually already holds cpuset_sem.  Double tripping on a kernel
+ * semaphore deadlocks the current task, and any other task that
+ * subsequently tries to obtain the lock.
+ *
+ * Run all up's and down's on cpuset_sem through the following
+ * wrappers, which will detect this nested locking, and avoid
+ * deadlocking.
+ */
+
+static inline void cpuset_down(struct semaphore *psem)
+{
+       if (current->cpuset_sem_nest_depth == 0)
+               down(psem);
+       current->cpuset_sem_nest_depth++;
+}
+
+static inline void cpuset_up(struct semaphore *psem)
+{
+       current->cpuset_sem_nest_depth--;
+       if (current->cpuset_sem_nest_depth == 0)
+               up(psem);
+}
+
 /*
  * A couple of forward declarations required, due to cyclic reference loop:
  *  cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file
@@ -522,19 +553,10 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
  * Refresh current tasks mems_allowed and mems_generation from
  * current tasks cpuset.  Call with cpuset_sem held.
  *
- * Be sure to call refresh_mems() on any cpuset operation which
- * (1) holds cpuset_sem, and (2) might possibly alloc memory.
- * Call after obtaining cpuset_sem lock, before any possible
- * allocation.  Otherwise one risks trying to allocate memory
- * while the task cpuset_mems_generation is not the same as
- * the mems_generation in its cpuset, which would deadlock on
- * cpuset_sem in cpuset_update_current_mems_allowed().
- *
- * Since we hold cpuset_sem, once refresh_mems() is called, the
- * test (current->cpuset_mems_generation != cs->mems_generation)
- * in cpuset_update_current_mems_allowed() will remain false,
- * until we drop cpuset_sem.  Anyone else who would change our
- * cpusets mems_generation needs to lock cpuset_sem first.
+ * This routine is needed to update the per-task mems_allowed
+ * data, within the tasks context, when it is trying to allocate
+ * memory (in various mm/mempolicy.c routines) and notices
+ * that some other task has been modifying its cpuset.
  */
 
 static void refresh_mems(void)
@@ -840,7 +862,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
        }
        buffer[nbytes] = 0;     /* nul-terminate */
 
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
 
        if (is_removed(cs)) {
                retval = -ENODEV;
@@ -874,7 +896,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
        if (retval == 0)
                retval = nbytes;
 out2:
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
        cpuset_release_agent(pathbuf);
 out1:
        kfree(buffer);
@@ -914,9 +936,9 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
 {
        cpumask_t mask;
 
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
        mask = cs->cpus_allowed;
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
 
        return cpulist_scnprintf(page, PAGE_SIZE, mask);
 }
@@ -925,9 +947,9 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
 {
        nodemask_t mask;
 
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
        mask = cs->mems_allowed;
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
 
        return nodelist_scnprintf(page, PAGE_SIZE, mask);
 }
@@ -1334,8 +1356,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
        if (!cs)
                return -ENOMEM;
 
-       down(&cpuset_sem);
-       refresh_mems();
+       cpuset_down(&cpuset_sem);
        cs->flags = 0;
        if (notify_on_release(parent))
                set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
@@ -1360,14 +1381,14 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
         * will down() this new directory's i_sem and if we race with
         * another mkdir, we might deadlock.
         */
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
 
        err = cpuset_populate_dir(cs->dentry);
        /* If err < 0, we have a half-filled directory - oh well ;) */
        return 0;
 err:
        list_del(&cs->sibling);
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
        kfree(cs);
        return err;
 }
@@ -1389,14 +1410,13 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
 
        /* the vfs holds both inode->i_sem already */
 
-       down(&cpuset_sem);
-       refresh_mems();
+       cpuset_down(&cpuset_sem);
        if (atomic_read(&cs->count) > 0) {
-               up(&cpuset_sem);
+               cpuset_up(&cpuset_sem);
                return -EBUSY;
        }
        if (!list_empty(&cs->children)) {
-               up(&cpuset_sem);
+               cpuset_up(&cpuset_sem);
                return -EBUSY;
        }
        parent = cs->parent;
@@ -1412,7 +1432,7 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
        spin_unlock(&d->d_lock);
        cpuset_d_remove_dir(d);
        dput(d);
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
        cpuset_release_agent(pathbuf);
        return 0;
 }
@@ -1515,10 +1535,10 @@ void cpuset_exit(struct task_struct *tsk)
        if (notify_on_release(cs)) {
                char *pathbuf = NULL;
 
-               down(&cpuset_sem);
+               cpuset_down(&cpuset_sem);
                if (atomic_dec_and_test(&cs->count))
                        check_for_release(cs, &pathbuf);
-               up(&cpuset_sem);
+               cpuset_up(&cpuset_sem);
                cpuset_release_agent(pathbuf);
        } else {
                atomic_dec(&cs->count);
@@ -1539,11 +1559,11 @@ cpumask_t cpuset_cpus_allowed(const struct task_struct *tsk)
 {
        cpumask_t mask;
 
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
        task_lock((struct task_struct *)tsk);
        guarantee_online_cpus(tsk->cpuset, &mask);
        task_unlock((struct task_struct *)tsk);
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
 
        return mask;
 }
@@ -1568,9 +1588,9 @@ void cpuset_update_current_mems_allowed(void)
        if (!cs)
                return;         /* task is exiting */
        if (current->cpuset_mems_generation != cs->mems_generation) {
-               down(&cpuset_sem);
+               cpuset_down(&cpuset_sem);
                refresh_mems();
-               up(&cpuset_sem);
+               cpuset_up(&cpuset_sem);
        }
 }
 
@@ -1669,14 +1689,14 @@ int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask)
                return 0;
 
        /* Not hardwall and node outside mems_allowed: scan up cpusets */
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
        cs = current->cpuset;
        if (!cs)
                goto done;              /* current task exiting */
        cs = nearest_exclusive_ancestor(cs);
        allowed = node_isset(node, cs->mems_allowed);
 done:
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
        return allowed;
 }
 
@@ -1697,7 +1717,7 @@ int cpuset_excl_nodes_overlap(const struct task_struct *p)
        const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
        int overlap = 0;                /* do cpusets overlap? */
 
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
        cs1 = current->cpuset;
        if (!cs1)
                goto done;              /* current task exiting */
@@ -1708,7 +1728,7 @@ int cpuset_excl_nodes_overlap(const struct task_struct *p)
        cs2 = nearest_exclusive_ancestor(cs2);
        overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
 done:
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
 
        return overlap;
 }
@@ -1731,7 +1751,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v)
                return -ENOMEM;
 
        tsk = m->private;
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
        task_lock(tsk);
        cs = tsk->cpuset;
        task_unlock(tsk);
@@ -1746,7 +1766,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v)
        seq_puts(m, buf);
        seq_putc(m, '\n');
 out:
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
        kfree(buf);
        return retval;
 }
index 2632b81..dbd4490 100644 (file)
@@ -875,7 +875,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
  * smp_call_function() if an IPI is sent by the same process we are
  * waiting to become inactive.
  */
-void wait_task_inactive(task_t * p)
+void wait_task_inactive(task_t *p)
 {
        unsigned long flags;
        runqueue_t *rq;
@@ -966,8 +966,11 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
                int local_group;
                int i;
 
+               /* Skip over this group if it has no CPUs allowed */
+               if (!cpus_intersects(group->cpumask, p->cpus_allowed))
+                       goto nextgroup;
+
                local_group = cpu_isset(this_cpu, group->cpumask);
-               /* XXX: put a cpus allowed check */
 
                /* Tally up the load of all CPUs in the group */
                avg_load = 0;
@@ -992,6 +995,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
                        min_load = avg_load;
                        idlest = group;
                }
+nextgroup:
                group = group->next;
        } while (group != sd->groups);
 
@@ -1003,13 +1007,18 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
 /*
  * find_idlest_queue - find the idlest runqueue among the cpus in group.
  */
-static int find_idlest_cpu(struct sched_group *group, int this_cpu)
+static int
+find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 {
+       cpumask_t tmp;
        unsigned long load, min_load = ULONG_MAX;
        int idlest = -1;
        int i;
 
-       for_each_cpu_mask(i, group->cpumask) {
+       /* Traverse only the allowed CPUs */
+       cpus_and(tmp, group->cpumask, p->cpus_allowed);
+
+       for_each_cpu_mask(i, tmp) {
                load = source_load(i, 0);
 
                if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -1052,7 +1061,7 @@ static int sched_balance_self(int cpu, int flag)
                if (!group)
                        goto nextlevel;
 
-               new_cpu = find_idlest_cpu(group, cpu);
+               new_cpu = find_idlest_cpu(group, t, cpu);
                if (new_cpu == -1 || new_cpu == cpu)
                        goto nextlevel;
 
@@ -1127,7 +1136,7 @@ static inline int wake_idle(int cpu, task_t *p)
  *
  * returns failure only if the task is already active.
  */
-static int try_to_wake_up(task_t * p, unsigned int state, int sync)
+static int try_to_wake_up(task_t *p, unsigned int state, int sync)
 {
        int cpu, this_cpu, success = 0;
        unsigned long flags;
@@ -1251,6 +1260,16 @@ out_activate:
                p->activated = -1;
        }
 
+       /*
+        * Tasks that have marked their sleep as noninteractive get
+        * woken up without updating their sleep average. (i.e. their
+        * sleep is handled in a priority-neutral manner, no priority
+        * boost and no penalty.)
+        */
+       if (old_state & TASK_NONINTERACTIVE)
+               __activate_task(p, rq);
+       else
+               activate_task(p, rq, cpu == this_cpu);
        /*
         * Sync wakeups (i.e. those types of wakeups where the waker
         * has indicated that it will leave the CPU in short order)
@@ -1259,7 +1278,6 @@ out_activate:
         * the waker guarantees that the freshly woken up task is going
         * to be considered on this CPU.)
         */
-       activate_task(p, rq, cpu == this_cpu);
        if (!sync || cpu != this_cpu) {
                if (TASK_PREEMPTS_CURR(p, rq))
                        resched_task(rq->curr);
@@ -1274,7 +1292,7 @@ out:
        return success;
 }
 
-int fastcall wake_up_process(task_t * p)
+int fastcall wake_up_process(task_t *p)
 {
        return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
                                 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
@@ -1353,7 +1371,7 @@ void fastcall sched_fork(task_t *p, int clone_flags)
  * that must be done for every newly created context, then puts the task
  * on the runqueue and wakes it.
  */
-void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags)
+void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
 {
        unsigned long flags;
        int this_cpu, cpu;
@@ -1436,7 +1454,7 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags)
  * artificially, because any timeslice recovered here
  * was given away by the parent in the first place.)
  */
-void fastcall sched_exit(task_t * p)
+void fastcall sched_exit(task_t *p)
 {
        unsigned long flags;
        runqueue_t *rq;
@@ -1511,6 +1529,10 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
         *              Manfred Spraul <manfred@colorfullife.com>
         */
        prev_task_flags = prev->flags;
+#ifdef CONFIG_DEBUG_SPINLOCK
+       /* this is a valid case when another task releases the spinlock */
+       rq->lock.owner = current;
+#endif
        finish_arch_switch(prev);
        finish_lock_switch(rq, prev);
        if (mm)
@@ -1753,7 +1775,8 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
  */
 static inline
 int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
-            struct sched_domain *sd, enum idle_type idle, int *all_pinned)
+                    struct sched_domain *sd, enum idle_type idle,
+                    int *all_pinned)
 {
        /*
         * We do not migrate tasks that are:
@@ -1883,10 +1906,11 @@ out:
  */
 static struct sched_group *
 find_busiest_group(struct sched_domain *sd, int this_cpu,
-                  unsigned long *imbalance, enum idle_type idle)
+                  unsigned long *imbalance, enum idle_type idle, int *sd_idle)
 {
        struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
        unsigned long max_load, avg_load, total_load, this_load, total_pwr;
+       unsigned long max_pull;
        int load_idx;
 
        max_load = this_load = total_load = total_pwr = 0;
@@ -1908,6 +1932,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                avg_load = 0;
 
                for_each_cpu_mask(i, group->cpumask) {
+                       if (*sd_idle && !idle_cpu(i))
+                               *sd_idle = 0;
+
                        /* Bias balancing toward cpus of our domain */
                        if (local_group)
                                load = target_load(i, load_idx);
@@ -1933,7 +1960,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                group = group->next;
        } while (group != sd->groups);
 
-       if (!busiest || this_load >= max_load)
+       if (!busiest || this_load >= max_load || max_load <= SCHED_LOAD_SCALE)
                goto out_balanced;
 
        avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
@@ -1953,8 +1980,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
         * by pulling tasks to us.  Be careful of negative numbers as they'll
         * appear as very large values with unsigned longs.
         */
+
+       /* Don't want to pull so many tasks that a group would go idle */
+       max_pull = min(max_load - avg_load, max_load - SCHED_LOAD_SCALE);
+
        /* How much load to actually move to equalise the imbalance */
-       *imbalance = min((max_load - avg_load) * busiest->cpu_power,
+       *imbalance = min(max_pull * busiest->cpu_power,
                                (avg_load - this_load) * this->cpu_power)
                        / SCHED_LOAD_SCALE;
 
@@ -2051,11 +2082,14 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
        unsigned long imbalance;
        int nr_moved, all_pinned = 0;
        int active_balance = 0;
+       int sd_idle = 0;
+
+       if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER)
+               sd_idle = 1;
 
-       spin_lock(&this_rq->lock);
        schedstat_inc(sd, lb_cnt[idle]);
 
-       group = find_busiest_group(sd, this_cpu, &imbalance, idle);
+       group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle);
        if (!group) {
                schedstat_inc(sd, lb_nobusyg[idle]);
                goto out_balanced;
@@ -2079,19 +2113,16 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
                 * still unbalanced. nr_moved simply stays zero, so it is
                 * correctly treated as an imbalance.
                 */
-               double_lock_balance(this_rq, busiest);
+               double_rq_lock(this_rq, busiest);
                nr_moved = move_tasks(this_rq, this_cpu, busiest,
-                                               imbalance, sd, idle,
-                                               &all_pinned);
-               spin_unlock(&busiest->lock);
+                                       imbalance, sd, idle, &all_pinned);
+               double_rq_unlock(this_rq, busiest);
 
                /* All tasks on this runqueue were pinned by CPU affinity */
                if (unlikely(all_pinned))
                        goto out_balanced;
        }
 
-       spin_unlock(&this_rq->lock);
-
        if (!nr_moved) {
                schedstat_inc(sd, lb_failed[idle]);
                sd->nr_balance_failed++;
@@ -2099,6 +2130,16 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
                if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
 
                        spin_lock(&busiest->lock);
+
+                       /* don't kick the migration_thread, if the curr
+                        * task on busiest cpu can't be moved to this_cpu
+                        */
+                       if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
+                               spin_unlock(&busiest->lock);
+                               all_pinned = 1;
+                               goto out_one_pinned;
+                       }
+
                        if (!busiest->active_balance) {
                                busiest->active_balance = 1;
                                busiest->push_cpu = this_cpu;
@@ -2131,19 +2172,23 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
                        sd->balance_interval *= 2;
        }
 
+       if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+               return -1;
        return nr_moved;
 
 out_balanced:
-       spin_unlock(&this_rq->lock);
-
        schedstat_inc(sd, lb_balanced[idle]);
 
        sd->nr_balance_failed = 0;
+
+out_one_pinned:
        /* tune up the balancing interval */
        if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
                        (sd->balance_interval < sd->max_interval))
                sd->balance_interval *= 2;
 
+       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+               return -1;
        return 0;
 }
 
@@ -2161,9 +2206,13 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
        runqueue_t *busiest = NULL;
        unsigned long imbalance;
        int nr_moved = 0;
+       int sd_idle = 0;
+
+       if (sd->flags & SD_SHARE_CPUPOWER)
+               sd_idle = 1;
 
        schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
-       group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE);
+       group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, &sd_idle);
        if (!group) {
                schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
                goto out_balanced;
@@ -2177,22 +2226,30 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
 
        BUG_ON(busiest == this_rq);
 
-       /* Attempt to move tasks */
-       double_lock_balance(this_rq, busiest);
-
        schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
-       nr_moved = move_tasks(this_rq, this_cpu, busiest,
+
+       nr_moved = 0;
+       if (busiest->nr_running > 1) {
+               /* Attempt to move tasks */
+               double_lock_balance(this_rq, busiest);
+               nr_moved = move_tasks(this_rq, this_cpu, busiest,
                                        imbalance, sd, NEWLY_IDLE, NULL);
-       if (!nr_moved)
+               spin_unlock(&busiest->lock);
+       }
+
+       if (!nr_moved) {
                schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
-       else
+               if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+                       return -1;
+       } else
                sd->nr_balance_failed = 0;
 
-       spin_unlock(&busiest->lock);
        return nr_moved;
 
 out_balanced:
        schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
+       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+               return -1;
        sd->nr_balance_failed = 0;
        return 0;
 }
@@ -2317,7 +2374,11 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
 
                if (j - sd->last_balance >= interval) {
                        if (load_balance(this_cpu, this_rq, sd, idle)) {
-                               /* We've pulled tasks over so no longer idle */
+                               /*
+                                * We've pulled tasks over so either we're no
+                                * longer idle, or one of our SMT siblings is
+                                * not idle.
+                                */
                                idle = NOT_IDLE;
                        }
                        sd->last_balance += interval;
@@ -2576,6 +2637,13 @@ out:
 }
 
 #ifdef CONFIG_SCHED_SMT
+static inline void wakeup_busy_runqueue(runqueue_t *rq)
+{
+       /* If an SMT runqueue is sleeping due to priority reasons wake it up */
+       if (rq->curr == rq->idle && rq->nr_running)
+               resched_task(rq->idle);
+}
+
 static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
 {
        struct sched_domain *tmp, *sd = NULL;
@@ -2609,12 +2677,7 @@ static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
        for_each_cpu_mask(i, sibling_map) {
                runqueue_t *smt_rq = cpu_rq(i);
 
-               /*
-                * If an SMT sibling task is sleeping due to priority
-                * reasons wake it up now.
-                */
-               if (smt_rq->curr == smt_rq->idle && smt_rq->nr_running)
-                       resched_task(smt_rq->idle);
+               wakeup_busy_runqueue(smt_rq);
        }
 
        for_each_cpu_mask(i, sibling_map)
@@ -2625,6 +2688,16 @@ static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
         */
 }
 
+/*
+ * number of 'lost' timeslices this task wont be able to fully
+ * utilize, if another task runs on a sibling. This models the
+ * slowdown effect of other tasks running on siblings:
+ */
+static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
+{
+       return p->time_slice * (100 - sd->per_cpu_gain) / 100;
+}
+
 static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
 {
        struct sched_domain *tmp, *sd = NULL;
@@ -2668,6 +2741,10 @@ static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
                runqueue_t *smt_rq = cpu_rq(i);
                task_t *smt_curr = smt_rq->curr;
 
+               /* Kernel threads do not participate in dependent sleeping */
+               if (!p->mm || !smt_curr->mm || rt_task(p))
+                       goto check_smt_task;
+
                /*
                 * If a user task with lower static priority than the
                 * running task on the SMT sibling is trying to schedule,
@@ -2676,21 +2753,45 @@ static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
                 * task from using an unfair proportion of the
                 * physical cpu's resources. -ck
                 */
-               if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) / 100) >
-                       task_timeslice(p) || rt_task(smt_curr)) &&
-                       p->mm && smt_curr->mm && !rt_task(p))
-                               ret = 1;
+               if (rt_task(smt_curr)) {
+                       /*
+                        * With real time tasks we run non-rt tasks only
+                        * per_cpu_gain% of the time.
+                        */
+                       if ((jiffies % DEF_TIMESLICE) >
+                               (sd->per_cpu_gain * DEF_TIMESLICE / 100))
+                                       ret = 1;
+               } else
+                       if (smt_curr->static_prio < p->static_prio &&
+                               !TASK_PREEMPTS_CURR(p, smt_rq) &&
+                               smt_slice(smt_curr, sd) > task_timeslice(p))
+                                       ret = 1;
+
+check_smt_task:
+               if ((!smt_curr->mm && smt_curr != smt_rq->idle) ||
+                       rt_task(smt_curr))
+                               continue;
+               if (!p->mm) {
+                       wakeup_busy_runqueue(smt_rq);
+                       continue;
+               }
 
                /*
-                * Reschedule a lower priority task on the SMT sibling,
-                * or wake it up if it has been put to sleep for priority
-                * reasons.
+                * Reschedule a lower priority task on the SMT sibling for
+                * it to be put to sleep, or wake it up if it has been put to
+                * sleep for priority reasons to see if it should run now.
                 */
-               if ((((p->time_slice * (100 - sd->per_cpu_gain) / 100) >
-                       task_timeslice(smt_curr) || rt_task(p)) &&
-                       smt_curr->mm && p->mm && !rt_task(smt_curr)) ||
-                       (smt_curr == smt_rq->idle && smt_rq->nr_running))
-                               resched_task(smt_curr);
+               if (rt_task(p)) {
+                       if ((jiffies % DEF_TIMESLICE) >
+                               (sd->per_cpu_gain * DEF_TIMESLICE / 100))
+                                       resched_task(smt_curr);
+               } else {
+                       if (TASK_PREEMPTS_CURR(p, smt_rq) &&
+                               smt_slice(p, sd) > task_timeslice(smt_curr))
+                                       resched_task(smt_curr);
+                       else
+                               wakeup_busy_runqueue(smt_rq);
+               }
        }
 out_unlock:
        for_each_cpu_mask(i, sibling_map)
@@ -3016,7 +3117,8 @@ need_resched:
 
 #endif /* CONFIG_PREEMPT */
 
-int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key)
+int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
+                         void *key)
 {
        task_t *p = curr->private;
        return try_to_wake_up(p, mode, sync);
@@ -3058,7 +3160,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
  * @key: is directly passed to the wakeup function
  */
 void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
-                               int nr_exclusive, void *key)
+                       int nr_exclusive, void *key)
 {
        unsigned long flags;
 
@@ -3090,7 +3192,8 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
  *
  * On UP it can prevent extra preemption.
  */
-void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+void fastcall
+__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
 {
        unsigned long flags;
        int sync = 1;
@@ -3281,7 +3384,8 @@ void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
 
 EXPORT_SYMBOL(interruptible_sleep_on);
 
-long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
+long fastcall __sched
+interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
        SLEEP_ON_VAR
 
@@ -3500,7 +3604,8 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
  * @policy: new policy.
  * @param: structure containing the new RT priority.
  */
-int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param)
+int sched_setscheduler(struct task_struct *p, int policy,
+                      struct sched_param *param)
 {
        int retval;
        int oldprio, oldpolicy = -1;
@@ -3520,7 +3625,7 @@ recheck:
         * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
         */
        if (param->sched_priority < 0 ||
-           (p->mm &&  param->sched_priority > MAX_USER_RT_PRIO-1) ||
+           (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
            (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
                return -EINVAL;
        if ((policy == SCHED_NORMAL) != (param->sched_priority == 0))
@@ -3583,7 +3688,8 @@ recheck:
 }
 EXPORT_SYMBOL_GPL(sched_setscheduler);
 
-static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+static int
+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
 {
        int retval;
        struct sched_param lparam;
@@ -3850,7 +3956,7 @@ asmlinkage long sys_sched_yield(void)
        if (rt_task(current))
                target = rq->active;
 
-       if (current->array->nr_active == 1) {
+       if (array->nr_active == 1) {
                schedstat_inc(rq, yld_act_empty);
                if (!rq->expired->nr_active)
                        schedstat_inc(rq, yld_both_empty);
@@ -3914,7 +4020,7 @@ EXPORT_SYMBOL(cond_resched);
  * operations here to prevent schedule() from being called twice (once via
  * spin_unlock(), once by hand).
  */
-int cond_resched_lock(spinlock_t * lock)
+int cond_resched_lock(spinlock_t *lock)
 {
        int ret = 0;
 
@@ -4097,7 +4203,7 @@ static inline struct task_struct *younger_sibling(struct task_struct *p)
        return list_entry(p->sibling.next,struct task_struct,sibling);
 }
 
-static void show_task(task_t * p)
+static void show_task(task_t *p)
 {
        task_t *relative;
        unsigned state;
@@ -4123,7 +4229,7 @@ static void show_task(task_t * p)
 #endif
 #ifdef CONFIG_DEBUG_STACK_USAGE
        {
-               unsigned long * n = (unsigned long *) (p->thread_info+1);
+               unsigned long *n = (unsigned long *) (p->thread_info+1);
                while (!*n)
                        n++;
                free = (unsigned long) n - (unsigned long)(p->thread_info+1);
@@ -4332,7 +4438,7 @@ out:
  * thread migration by bumping thread off CPU then 'pushing' onto
  * another runqueue.
  */
-static int migration_thread(void * data)
+static int migration_thread(void *data)
 {
        runqueue_t *rq;
        int cpu = (long)data;
index 4980a07..b92c3c9 100644 (file)
@@ -2221,8 +2221,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
                        recalc_sigpending();
                        spin_unlock_irq(&current->sighand->siglock);
 
-                       current->state = TASK_INTERRUPTIBLE;
-                       timeout = schedule_timeout(timeout);
+                       timeout = schedule_timeout_interruptible(timeout);
 
                        try_to_freeze();
                        spin_lock_irq(&current->sighand->siglock);
index 0c3f9d8..0375fcd 100644 (file)
@@ -3,7 +3,10 @@
  *
  * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
  *
- * Copyright (2004) Ingo Molnar
+ * Copyright (2004, 2005) Ingo Molnar
+ *
+ * This file contains the spinlock/rwlock implementations for the
+ * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
  */
 
 #include <linux/config.h>
  * Generic declaration of the raw read_trylock() function,
  * architectures are supposed to optimize this:
  */
-int __lockfunc generic_raw_read_trylock(rwlock_t *lock)
+int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock)
 {
-       _raw_read_lock(lock);
+       __raw_read_lock(lock);
        return 1;
 }
-EXPORT_SYMBOL(generic_raw_read_trylock);
+EXPORT_SYMBOL(generic__raw_read_trylock);
 
 int __lockfunc _spin_trylock(spinlock_t *lock)
 {
@@ -57,7 +60,7 @@ int __lockfunc _write_trylock(rwlock_t *lock)
 }
 EXPORT_SYMBOL(_write_trylock);
 
-#ifndef CONFIG_PREEMPT
+#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
 
 void __lockfunc _read_lock(rwlock_t *lock)
 {
@@ -72,7 +75,7 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
 
        local_irq_save(flags);
        preempt_disable();
-       _raw_spin_lock_flags(lock, flags);
+       _raw_spin_lock_flags(lock, &flags);
        return flags;
 }
 EXPORT_SYMBOL(_spin_lock_irqsave);
index 13e2b51..f4152fc 100644 (file)
@@ -1154,6 +1154,20 @@ fastcall signed long __sched schedule_timeout(signed long timeout)
 
 EXPORT_SYMBOL(schedule_timeout);
 
+signed long __sched schedule_timeout_interruptible(signed long timeout)
+{
+       set_current_state(TASK_INTERRUPTIBLE);
+       return schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(schedule_timeout_interruptible);
+
+signed long __sched schedule_timeout_uninterruptible(signed long timeout)
+{
+       set_current_state(TASK_UNINTERRUPTIBLE);
+       return schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(schedule_timeout_uninterruptible);
+
 /* Thread ID - the internal kernel "pid" */
 asmlinkage long sys_gettid(void)
 {
@@ -1170,8 +1184,7 @@ static long __sched nanosleep_restart(struct restart_block *restart)
        if (!time_after(expire, now))
                return 0;
 
-       current->state = TASK_INTERRUPTIBLE;
-       expire = schedule_timeout(expire - now);
+       expire = schedule_timeout_interruptible(expire - now);
 
        ret = 0;
        if (expire) {
@@ -1199,8 +1212,7 @@ asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __us
                return -EINVAL;
 
        expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
-       current->state = TASK_INTERRUPTIBLE;
-       expire = schedule_timeout(expire);
+       expire = schedule_timeout_interruptible(expire);
 
        ret = 0;
        if (expire) {
@@ -1598,10 +1610,8 @@ void msleep(unsigned int msecs)
 {
        unsigned long timeout = msecs_to_jiffies(msecs) + 1;
 
-       while (timeout) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               timeout = schedule_timeout(timeout);
-       }
+       while (timeout)
+               timeout = schedule_timeout_uninterruptible(timeout);
 }
 
 EXPORT_SYMBOL(msleep);
@@ -1614,10 +1624,8 @@ unsigned long msleep_interruptible(unsigned int msecs)
 {
        unsigned long timeout = msecs_to_jiffies(msecs) + 1;
 
-       while (timeout && !signal_pending(current)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               timeout = schedule_timeout(timeout);
-       }
+       while (timeout && !signal_pending(current))
+               timeout = schedule_timeout_interruptible(timeout);
        return jiffies_to_msecs(timeout);
 }
 
index d9c38ba..44a4675 100644 (file)
@@ -16,6 +16,7 @@ CFLAGS_kobject.o += -DDEBUG
 CFLAGS_kobject_uevent.o += -DDEBUG
 endif
 
+obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
index 6658d81..2377af0 100644 (file)
@@ -25,8 +25,6 @@
  * this is trivially done efficiently using a load-locked
  * store-conditional approach, for example.
  */
-
-#ifndef ATOMIC_DEC_AND_LOCK
 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 {
        spin_lock(lock);
@@ -37,4 +35,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif
index bd2bc5d..cb5490e 100644 (file)
@@ -177,8 +177,7 @@ static inline void __lock_kernel(void)
 
 static inline void __unlock_kernel(void)
 {
-       _raw_spin_unlock(&kernel_flag);
-       preempt_enable();
+       spin_unlock(&kernel_flag);
 }
 
 /*
index b972dd2..6a8bc6e 100644 (file)
@@ -110,7 +110,7 @@ radix_tree_node_free(struct radix_tree_node *node)
  * success, return zero, with preemption disabled.  On error, return -ENOMEM
  * with preemption not disabled.
  */
-int radix_tree_preload(int gfp_mask)
+int radix_tree_preload(unsigned int __nocast gfp_mask)
 {
        struct radix_tree_preload *rtp;
        struct radix_tree_node *node;
index b73dbb0..ddc4d35 100644 (file)
@@ -6,15 +6,16 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/sort.h>
 
-void u32_swap(void *a, void *b, int size)
+static void u32_swap(void *a, void *b, int size)
 {
        u32 t = *(u32 *)a;
        *(u32 *)a = *(u32 *)b;
        *(u32 *)b = t;
 }
 
-void generic_swap(void *a, void *b, int size)
+static void generic_swap(void *a, void *b, int size)
 {
        char t;
 
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
new file mode 100644 (file)
index 0000000..906ad10
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ *
+ * This file contains the spinlock/rwlock implementations for
+ * DEBUG_SPINLOCK.
+ */
+
+#include <linux/config.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+static void spin_bug(spinlock_t *lock, const char *msg)
+{
+       static long print_once = 1;
+       struct task_struct *owner = NULL;
+
+       if (xchg(&print_once, 0)) {
+               if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
+                       owner = lock->owner;
+               printk("BUG: spinlock %s on CPU#%d, %s/%d\n",
+                       msg, smp_processor_id(), current->comm, current->pid);
+               printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n",
+                       lock, lock->magic,
+                       owner ? owner->comm : "<none>",
+                       owner ? owner->pid : -1,
+                       lock->owner_cpu);
+               dump_stack();
+#ifdef CONFIG_SMP
+               /*
+                * We cannot continue on SMP:
+                */
+//             panic("bad locking");
+#endif
+       }
+}
+
+#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
+
+static inline void debug_spin_lock_before(spinlock_t *lock)
+{
+       SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
+       SPIN_BUG_ON(lock->owner == current, lock, "recursion");
+       SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
+                                                       lock, "cpu recursion");
+}
+
+static inline void debug_spin_lock_after(spinlock_t *lock)
+{
+       lock->owner_cpu = raw_smp_processor_id();
+       lock->owner = current;
+}
+
+static inline void debug_spin_unlock(spinlock_t *lock)
+{
+       SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
+       SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
+       SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
+       SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
+                                                       lock, "wrong CPU");
+       lock->owner = SPINLOCK_OWNER_INIT;
+       lock->owner_cpu = -1;
+}
+
+static void __spin_lock_debug(spinlock_t *lock)
+{
+       int print_once = 1;
+       u64 i;
+
+       for (;;) {
+               for (i = 0; i < loops_per_jiffy * HZ; i++) {
+                       cpu_relax();
+                       if (__raw_spin_trylock(&lock->raw_lock))
+                               return;
+               }
+               /* lockup suspected: */
+               if (print_once) {
+                       print_once = 0;
+                       printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n",
+                               smp_processor_id(), current->comm, current->pid,
+                                       lock);
+                       dump_stack();
+               }
+       }
+}
+
+void _raw_spin_lock(spinlock_t *lock)
+{
+       debug_spin_lock_before(lock);
+       if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
+               __spin_lock_debug(lock);
+       debug_spin_lock_after(lock);
+}
+
+int _raw_spin_trylock(spinlock_t *lock)
+{
+       int ret = __raw_spin_trylock(&lock->raw_lock);
+
+       if (ret)
+               debug_spin_lock_after(lock);
+#ifndef CONFIG_SMP
+       /*
+        * Must not happen on UP:
+        */
+       SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
+#endif
+       return ret;
+}
+
+void _raw_spin_unlock(spinlock_t *lock)
+{
+       debug_spin_unlock(lock);
+       __raw_spin_unlock(&lock->raw_lock);
+}
+
+static void rwlock_bug(rwlock_t *lock, const char *msg)
+{
+       static long print_once = 1;
+
+       if (xchg(&print_once, 0)) {
+               printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg,
+                       smp_processor_id(), current->comm, current->pid, lock);
+               dump_stack();
+#ifdef CONFIG_SMP
+               /*
+                * We cannot continue on SMP:
+                */
+               panic("bad locking");
+#endif
+       }
+}
+
+#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
+
+static void __read_lock_debug(rwlock_t *lock)
+{
+       int print_once = 1;
+       u64 i;
+
+       for (;;) {
+               for (i = 0; i < loops_per_jiffy * HZ; i++) {
+                       cpu_relax();
+                       if (__raw_read_trylock(&lock->raw_lock))
+                               return;
+               }
+               /* lockup suspected: */
+               if (print_once) {
+                       print_once = 0;
+                       printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n",
+                               smp_processor_id(), current->comm, current->pid,
+                                       lock);
+                       dump_stack();
+               }
+       }
+}
+
+void _raw_read_lock(rwlock_t *lock)
+{
+       RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
+       if (unlikely(!__raw_read_trylock(&lock->raw_lock)))
+               __read_lock_debug(lock);
+}
+
+int _raw_read_trylock(rwlock_t *lock)
+{
+       int ret = __raw_read_trylock(&lock->raw_lock);
+
+#ifndef CONFIG_SMP
+       /*
+        * Must not happen on UP:
+        */
+       RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
+#endif
+       return ret;
+}
+
+void _raw_read_unlock(rwlock_t *lock)
+{
+       RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
+       __raw_read_unlock(&lock->raw_lock);
+}
+
+static inline void debug_write_lock_before(rwlock_t *lock)
+{
+       RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
+       RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
+       RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
+                                                       lock, "cpu recursion");
+}
+
+static inline void debug_write_lock_after(rwlock_t *lock)
+{
+       lock->owner_cpu = raw_smp_processor_id();
+       lock->owner = current;
+}
+
+static inline void debug_write_unlock(rwlock_t *lock)
+{
+       RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
+       RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
+       RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
+                                                       lock, "wrong CPU");
+       lock->owner = SPINLOCK_OWNER_INIT;
+       lock->owner_cpu = -1;
+}
+
+static void __write_lock_debug(rwlock_t *lock)
+{
+       int print_once = 1;
+       u64 i;
+
+       for (;;) {
+               for (i = 0; i < loops_per_jiffy * HZ; i++) {
+                       cpu_relax();
+                       if (__raw_write_trylock(&lock->raw_lock))
+                               return;
+               }
+               /* lockup suspected: */
+               if (print_once) {
+                       print_once = 0;
+                       printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n",
+                               smp_processor_id(), current->comm, current->pid,
+                                       lock);
+                       dump_stack();
+               }
+       }
+}
+
+void _raw_write_lock(rwlock_t *lock)
+{
+       debug_write_lock_before(lock);
+       if (unlikely(!__raw_write_trylock(&lock->raw_lock)))
+               __write_lock_debug(lock);
+       debug_write_lock_after(lock);
+}
+
+int _raw_write_trylock(rwlock_t *lock)
+{
+       int ret = __raw_write_trylock(&lock->raw_lock);
+
+       if (ret)
+               debug_write_lock_after(lock);
+#ifndef CONFIG_SMP
+       /*
+        * Must not happen on UP:
+        */
+       RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
+#endif
+       return ret;
+}
+
+void _raw_write_unlock(rwlock_t *lock)
+{
+       debug_write_unlock(lock);
+       __raw_write_unlock(&lock->raw_lock);
+}
index 8861192..b534657 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/mman.h>
 
+static ssize_t
+generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+       loff_t offset, unsigned long nr_segs);
+
 /*
  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  * though.
@@ -301,8 +305,9 @@ EXPORT_SYMBOL(sync_page_range);
  * as it forces O_SYNC writers to different parts of the same file
  * to be serialised right until io completion.
  */
-int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
-                       loff_t pos, size_t count)
+static int sync_page_range_nolock(struct inode *inode,
+                                 struct address_space *mapping,
+                                 loff_t pos, size_t count)
 {
        pgoff_t start = pos >> PAGE_CACHE_SHIFT;
        pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
@@ -317,7 +322,6 @@ int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
                ret = wait_on_page_writeback_range(mapping, start, end);
        return ret;
 }
-EXPORT_SYMBOL(sync_page_range_nolock);
 
 /**
  * filemap_fdatawait - walk the list of under-writeback pages of the given
@@ -2008,7 +2012,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
 }
 EXPORT_SYMBOL(generic_file_buffered_write);
 
-ssize_t
+static ssize_t
 __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
                                unsigned long nr_segs, loff_t *ppos)
 {
@@ -2108,7 +2112,7 @@ generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
        return ret;
 }
 
-ssize_t
+static ssize_t
 __generic_file_write_nolock(struct file *file, const struct iovec *iov,
                                unsigned long nr_segs, loff_t *ppos)
 {
@@ -2229,7 +2233,7 @@ EXPORT_SYMBOL(generic_file_writev);
  * Called under i_sem for writes to S_ISREG files.   Returns -EIO if something
  * went wrong during pagecache shootdown.
  */
-ssize_t
+static ssize_t
 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        loff_t offset, unsigned long nr_segs)
 {
@@ -2264,4 +2268,3 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        }
        return retval;
 }
-EXPORT_SYMBOL_GPL(generic_file_direct_IO);
index 788a628..ae8161f 100644 (file)
@@ -2225,7 +2225,7 @@ void update_mem_hiwater(struct task_struct *tsk)
 #if !defined(__HAVE_ARCH_GATE_AREA)
 
 #if defined(AT_SYSINFO_EHDR)
-struct vm_area_struct gate_vma;
+static struct vm_area_struct gate_vma;
 
 static int __init gate_vma_init(void)
 {
index 5ec8da1..ac3bf33 100644 (file)
@@ -300,6 +300,5 @@ retry:
         * Give "p" a good chance of killing itself before we
         * retry to allocate memory.
         */
-       __set_current_state(TASK_INTERRUPTIBLE);
-       schedule_timeout(1);
+       schedule_timeout_interruptible(1);
 }
index 3974fd8..c5823c3 100644 (file)
@@ -335,7 +335,7 @@ static inline void free_pages_check(const char *function, struct page *page)
 /*
  * Frees a list of pages. 
  * Assumes all pages on list are in same zone, and of same order.
- * count is the number of pages to free, or 0 for all on the list.
+ * count is the number of pages to free.
  *
  * If the zone was previously in an "all pages pinned" state then look to
  * see if this freeing clears that state.
index 05a3910..9e876d6 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1720,7 +1720,7 @@ next:
        cachep->objsize = size;
 
        if (flags & CFLGS_OFF_SLAB)
-               cachep->slabp_cache = kmem_find_general_cachep(slab_size,0);
+               cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
        cachep->ctor = ctor;
        cachep->dtor = dtor;
        cachep->name = name;
@@ -2839,7 +2839,7 @@ out:
  * New and improved: it will now make sure that the object gets
  * put on the correct node list so that there is no false sharing.
  */
-void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid)
+void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid)
 {
        unsigned long save_flags;
        void *ptr;
index 029e56e..adbc2b4 100644 (file)
@@ -67,8 +67,8 @@ void show_swap_cache_info(void)
  * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
  */
-static int __add_to_swap_cache(struct page *page,
-               swp_entry_t entry, int gfp_mask)
+static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
+                              unsigned int __nocast gfp_mask)
 {
        int error;
 
index 4b6e8bf..0184f51 100644 (file)
@@ -1153,8 +1153,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
        p->highest_bit = 0;             /* cuts scans short */
        while (p->flags >= SWP_SCANNING) {
                spin_unlock(&swap_lock);
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
+               schedule_timeout_uninterruptible(1);
                spin_lock(&swap_lock);
        }
 
index 9087273..db3c708 100644 (file)
@@ -49,6 +49,9 @@ build := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.build obj
 cmd = @$(if $($(quiet)cmd_$(1)),\
       echo '  $(subst ','\'',$($(quiet)cmd_$(1)))' &&) $(cmd_$(1))
 
+# Add $(obj)/ for paths that is not absolute
+objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o)))
+
 ###
 # if_changed      - execute command if any prerequisite is newer than 
 #                   target, or command line has changed
index f04f627..c2d5414 100644 (file)
@@ -91,12 +91,7 @@ foreach $object (keys(%object)) {
                     $from !~ /\.exit\.data$/ &&
                     $from !~ /\.altinstructions$/ &&
                     $from !~ /\.pdr$/ &&
-                    $from !~ /\.debug_info$/ &&
-                    $from !~ /\.debug_aranges$/ &&
-                    $from !~ /\.debug_ranges$/ &&
-                    $from !~ /\.debug_line$/ &&
-                    $from !~ /\.debug_frame$/ &&
-                    $from !~ /\.debug_loc$/ &&
+                    $from !~ /\.debug_.*$/ &&
                     $from !~ /\.exitcall\.exit$/ &&
                     $from !~ /\.eh_frame$/ &&
                     $from !~ /\.stab$/)) {
index b62920e..d64790b 100644 (file)
@@ -42,8 +42,6 @@ MODULE_LICENSE("GPL");
 #else
 #define CSP_HDR_VALUE(a,b,c,d) ((d) | ((c)<<8) | ((b)<<16) | ((a)<<24))
 #endif
-#define LE_SHORT(v)            le16_to_cpu(v)
-#define LE_INT(v)              le32_to_cpu(v)
 
 #define RIFF_HEADER    CSP_HDR_VALUE('R', 'I', 'F', 'F')
 #define CSP__HEADER    CSP_HDR_VALUE('C', 'S', 'P', ' ')
@@ -56,20 +54,20 @@ MODULE_LICENSE("GPL");
 /*
  * RIFF data format
  */
-typedef struct riff_header {
+struct riff_header {
        __u32 name;
        __u32 len;
-} riff_header_t;
+};
 
-typedef struct desc_header {
-       riff_header_t info;
+struct desc_header {
+       struct riff_header info;
        __u16 func_nr;
        __u16 VOC_type;
        __u16 flags_play_rec;
        __u16 flags_16bit_8bit;
        __u16 flags_stereo_mono;
        __u16 flags_rates;
-} desc_header_t;
+};
 
 /*
  * prototypes
@@ -302,9 +300,9 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
        unsigned char __user *data_end;
        unsigned short func_nr = 0;
 
-       riff_header_t file_h, item_h, code_h;
+       struct riff_header file_h, item_h, code_h;
        __u32 item_type;
-       desc_header_t funcdesc_h;
+       struct desc_header funcdesc_h;
 
        unsigned long flags;
        int err;
@@ -316,12 +314,12 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
        if (copy_from_user(&file_h, data_ptr, sizeof(file_h)))
                return -EFAULT;
        if ((file_h.name != RIFF_HEADER) ||
-           (LE_INT(file_h.len) >= SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE - sizeof(file_h))) {
+           (le32_to_cpu(file_h.len) >= SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE - sizeof(file_h))) {
                snd_printd("%s: Invalid RIFF header\n", __FUNCTION__);
                return -EINVAL;
        }
        data_ptr += sizeof(file_h);
-       data_end = data_ptr + LE_INT(file_h.len);
+       data_end = data_ptr + le32_to_cpu(file_h.len);
 
        if (copy_from_user(&item_type, data_ptr, sizeof(item_type)))
                return -EFAULT;
@@ -331,7 +329,7 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
        }
        data_ptr += sizeof (item_type);
 
-       for (; data_ptr < data_end; data_ptr += LE_INT(item_h.len)) {
+       for (; data_ptr < data_end; data_ptr += le32_to_cpu(item_h.len)) {
                if (copy_from_user(&item_h, data_ptr, sizeof(item_h)))
                        return -EFAULT;
                data_ptr += sizeof(item_h);
@@ -344,7 +342,7 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
                case FUNC_HEADER:
                        if (copy_from_user(&funcdesc_h, data_ptr + sizeof(item_type), sizeof(funcdesc_h)))
                                return -EFAULT;
-                       func_nr = LE_SHORT(funcdesc_h.func_nr);
+                       func_nr = le16_to_cpu(funcdesc_h.func_nr);
                        break;
                case CODE_HEADER:
                        if (func_nr != info.func_req)
@@ -370,11 +368,11 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
                                if (code_h.name != INIT_HEADER)
                                        break;
                                data_ptr += sizeof(code_h);
-                               err = snd_sb_csp_load_user(p, data_ptr, LE_INT(code_h.len),
+                               err = snd_sb_csp_load_user(p, data_ptr, le32_to_cpu(code_h.len),
                                                      SNDRV_SB_CSP_LOAD_INITBLOCK);
                                if (err)
                                        return err;
-                               data_ptr += LE_INT(code_h.len);
+                               data_ptr += le32_to_cpu(code_h.len);
                        }
                        /* main microcode block */
                        if (copy_from_user(&code_h, data_ptr, sizeof(code_h)))
@@ -386,17 +384,17 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
                        }
                        data_ptr += sizeof(code_h);
                        err = snd_sb_csp_load_user(p, data_ptr,
-                                                  LE_INT(code_h.len), 0);
+                                                  le32_to_cpu(code_h.len), 0);
                        if (err)
                                return err;
 
                        /* fill in codec header */
                        strlcpy(p->codec_name, info.codec_name, sizeof(p->codec_name));
                        p->func_nr = func_nr;
-                       p->mode = LE_SHORT(funcdesc_h.flags_play_rec);
-                       switch (LE_SHORT(funcdesc_h.VOC_type)) {
+                       p->mode = le16_to_cpu(funcdesc_h.flags_play_rec);
+                       switch (le16_to_cpu(funcdesc_h.VOC_type)) {
                        case 0x0001:    /* QSound decoder */
-                               if (LE_SHORT(funcdesc_h.flags_play_rec) == SNDRV_SB_CSP_MODE_DSP_WRITE) {
+                               if (le16_to_cpu(funcdesc_h.flags_play_rec) == SNDRV_SB_CSP_MODE_DSP_WRITE) {
                                        if (snd_sb_qsound_build(p) == 0)
                                                /* set QSound flag and clear all other mode flags */
                                                p->mode = SNDRV_SB_CSP_MODE_QSOUND;
@@ -426,12 +424,12 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
                                p->mode = 0;
                                snd_printd("%s: Unsupported CSP codec type: 0x%04x\n",
                                           __FUNCTION__,
-                                          LE_SHORT(funcdesc_h.VOC_type));
+                                          le16_to_cpu(funcdesc_h.VOC_type));
                                return -EINVAL;
                        }
-                       p->acc_channels = LE_SHORT(funcdesc_h.flags_stereo_mono);
-                       p->acc_width = LE_SHORT(funcdesc_h.flags_16bit_8bit);
-                       p->acc_rates = LE_SHORT(funcdesc_h.flags_rates);
+                       p->acc_channels = le16_to_cpu(funcdesc_h.flags_stereo_mono);
+                       p->acc_width = le16_to_cpu(funcdesc_h.flags_16bit_8bit);
+                       p->acc_rates = le16_to_cpu(funcdesc_h.flags_rates);
 
                        /* Decouple CSP from IRQ and DMAREQ lines */
                        spin_lock_irqsave(&p->chip->reg_lock, flags);
diff --git a/sound/oss/skeleton.c b/sound/oss/skeleton.c
deleted file mode 100644 (file)
index 8fea783..0000000
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- *     PCI sound skeleton example
- *
- *     (c) 1998 Red Hat Software
- *
- *     This software may be used and distributed according to the 
- *     terms of the GNU General Public License, incorporated herein by 
- *     reference.
- *
- *     This example is designed to be built in the linux/drivers/sound
- *     directory as part of a kernel build. The example is modular only
- *     drop me a note once you have a working modular driver and want
- *     to integrate it with the main code.
- *             -- Alan <alan@redhat.com>
- *
- *     This is a first draft. Please report any errors, corrections or
- *     improvements to me.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-
-#include <asm/io.h>
-
-#include "sound_config.h"
-
-/*
- *     Define our PCI vendor ID here
- */
-#ifndef PCI_VENDOR_MYIDENT
-#define PCI_VENDOR_MYIDENT                     0x125D
-
-/*
- *     PCI identity for the card.
- */
-#define PCI_DEVICE_ID_MYIDENT_MYCARD1          0x1969
-#endif
-
-#define CARD_NAME      "ExampleWave 3D Pro Ultra ThingyWotsit"
-
-#define MAX_CARDS      8
-
-/*
- *     Each address_info object holds the information about one of
- *     our card resources. In this case the MSS emulation of our
- *     ficticious card. Its used to manage and attach things.
- */
-static struct address_info     mss_data[MAX_CARDS];
-static int                     cards;
-
-/*
- *     Install the actual card. This is an example
- */
-
-static int mycard_install(struct pci_dev *pcidev)
-{
-       int iobase;
-       int mssbase;
-       int mpubase;
-       u8 x;
-       u16 w;
-       u32 v;
-       int i;
-       int dma;
-
-       /*
-        *      Our imaginary code has its I/O on PCI address 0, a
-        *      MSS on PCI address 1 and an MPU on address 2
-        *
-        *      For the example we will only initialise the MSS
-        */
-               
-       iobase = pci_resource_start(pcidev, 0);
-       mssbase = pci_resource_start(pcidev, 1);
-       mpubase = pci_resource_start(pcidev, 2);
-       
-       /*
-        *      Reset the board
-        */
-        
-       /*
-        *      Wait for completion. udelay() waits in microseconds
-        */
-        
-       udelay(100);
-       
-       /*
-        *      Ok card ready. Begin setup proper. You might for example
-        *      load the firmware here
-        */
-       
-       dma = card_specific_magic(ioaddr);
-       
-       /*
-        *      Turn on legacy mode (example), There are also byte and
-        *      dword (32bit) PCI configuration function calls
-        */
-
-       pci_read_config_word(pcidev, 0x40, &w);
-       w&=~(1<<15);                    /* legacy decode on */
-       w|=(1<<14);                     /* Reserved write as 1 in this case */
-       w|=(1<<3)|(1<<1)|(1<<0);        /* SB on , FM on, MPU on */
-       pci_write_config_word(pcidev, 0x40, w);
-       
-       /*
-        *      Let the user know we found his toy.
-        */
-        
-       printk(KERN_INFO "Programmed "CARD_NAME" at 0x%X to legacy mode.\n",
-               iobase);
-               
-       /*
-        *      Now set it up the description of the card
-        */
-        
-       mss_data[cards].io_base = mssbase;
-       mss_data[cards].irq = pcidev->irq;
-       mss_data[cards].dma = dma;
-       
-       /*
-        *      Check there is an MSS present
-        */
-
-       if(ad1848_detect(mssbase, NULL, mss_data[cards].osp)==0)
-               return 0;
-               
-       /*
-        *      Initialize it
-        */
-        
-       mss_data[cards].slots[3] = ad1848_init("MyCard MSS 16bit", 
-                       mssbase,
-                       mss_data[cards].irq,
-                       mss_data[cards].dma,
-                       mss_data[cards].dma,
-                       0,
-                       0,
-                       THIS_MODULE);
-
-       cards++;        
-       return 1;
-}
-
-
-/*
- *     This loop walks the PCI configuration database and finds where
- *     the sound cards are.
- */
-int init_mycard(void)
-{
-       struct pci_dev *pcidev=NULL;
-       int count=0;
-               
-       while((pcidev = pci_find_device(PCI_VENDOR_MYIDENT, PCI_DEVICE_ID_MYIDENT_MYCARD1, pcidev))!=NULL)
-       {
-               if (pci_enable_device(pcidev))
-                       continue;
-               count+=mycard_install(pcidev);
-               if(count)
-                       return 0;
-               if(count==MAX_CARDS)
-                       break;
-       }
-       
-       if(count==0)
-               return -ENODEV;
-       return 0;
-}
-
-/*
- *     This function is called when the user or kernel loads the 
- *     module into memory.
- */
-
-
-int init_module(void)
-{
-       if(init_mycard()<0)
-       {
-               printk(KERN_ERR "No "CARD_NAME" cards found.\n");
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-/*
- *     This is called when it is removed. It will only be removed 
- *     when its use count is 0.
- */
-void cleanup_module(void)
-{
-       for(i=0;i< cards; i++)
-       {
-               /*
-                *      Free attached resources
-                */
-                
-               ad1848_unload(mss_data[i].io_base,
-                             mss_data[i].irq,
-                             mss_data[i].dma,
-                             mss_data[i].dma,
-                             0);
-               /*
-                *      And disconnect the device from the kernel
-                */
-               sound_unload_audiodevice(mss_data[i].slots[3]);
-       }
-}
-