Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Apr 2010 17:40:36 +0000 (10:40 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Apr 2010 17:40:36 +0000 (10:40 -0700)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6:
  ALSA: hda: Use STAC_DELL_M6_BOTH quirk for Dell Studio 1558
  ALSA: hda: Use LPIB quirk for DG965OT board version AAD63733-203
  ALSA: snd-meastro3: Ignore spurious HV interrupts during suspend / resume
  ALSA: snd-meastro3: Add amp_gpio quirk for Compaq EVO N600C
  ALSA: hda: Use ALC880_F1734 quirk for Fujitsu Siemens AMILO Xi 1526
  ALSA: hda: Use STAC_DELL_M6_BOTH quirk for Dell Studio XPS 1645
  ALSA: hda - Fix resume from StR of HP 2510p with docking-station

300 files changed:
Documentation/HOWTO
Documentation/RCU/NMI-RCU.txt
Documentation/RCU/checklist.txt
Documentation/RCU/lockdep.txt
Documentation/RCU/whatisRCU.txt
Documentation/input/multi-touch-protocol.txt
Documentation/kernel-parameters.txt
Documentation/networking/timestamping.txt
Documentation/stable_kernel_rules.txt
MAINTAINERS
Makefile
arch/arm/include/asm/highmem.h
arch/arm/include/asm/kmap_types.h
arch/arm/include/asm/ucontext.h
arch/arm/include/asm/user.h
arch/arm/kernel/signal.c
arch/arm/mach-at91/Makefile
arch/arm/mach-at91/pm_slowclock.S
arch/arm/mm/copypage-v6.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/flush.c
arch/arm/mm/highmem.c
arch/arm/mm/mmu.c
arch/arm/vfp/vfpmodule.c
arch/ia64/kvm/kvm-ia64.c
arch/m68k/include/asm/atomic_mm.h
arch/m68k/include/asm/mcfuart.h
arch/m68k/include/asm/sigcontext.h
arch/m68knommu/Makefile
arch/m68knommu/kernel/entry.S
arch/m68knommu/platform/68360/ints.c
arch/powerpc/kvm/book3s.c
arch/s390/include/asm/vdso.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/swsusp_asm64.S
arch/s390/kernel/time.c
arch/s390/kernel/vdso32/clock_gettime.S
arch/s390/kernel/vdso32/gettimeofday.S
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/kernel/vdso64/gettimeofday.S
arch/sparc/Kconfig
arch/sparc/Kconfig.debug
arch/sparc/include/asm/cpudata_64.h
arch/sparc/include/asm/irqflags_64.h
arch/sparc/include/asm/thread_info_64.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/ftrace.c
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/kgdb_64.c
arch/sparc/kernel/kstack.h
arch/sparc/kernel/nmi.c
arch/sparc/kernel/pci_common.c
arch/sparc/kernel/pcr.c
arch/sparc/kernel/rtrap_64.S
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/time_64.c
arch/sparc/kernel/traps_64.c
arch/sparc/kernel/unaligned_64.c
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/lib/mcount.S
arch/um/drivers/line.c
arch/um/os-Linux/helper.c
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/amd_iommu_types.h
arch/x86/include/asm/lguest_hcall.h
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/amd_iommu_init.c
arch/x86/kernel/aperture_64.c
arch/x86/kernel/crash.c
arch/x86/kernel/dumpstack.h
arch/x86/kernel/pci-gart_64.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lguest/boot.c
arch/x86/lguest/i386_head.S
drivers/acpi/acpica/exprep.c
drivers/ata/libata-eh.c
drivers/ata/pata_pcmcia.c
drivers/char/agp/intel-agp.c
drivers/char/pcmcia/cm4000_cs.c
drivers/firewire/core-cdev.c
drivers/firewire/core-iso.c
drivers/firewire/ohci.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_debug.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_opregion.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_modes.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r100_track.h
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r600_audio.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_family.h
drivers/gpu/drm/radeon/reg_srcs/r300
drivers/gpu/drm/radeon/reg_srcs/r420
drivers/gpu/drm/radeon/reg_srcs/rs600
drivers/gpu/drm/radeon/reg_srcs/rv515
drivers/gpu/drm/radeon/rs600.c
drivers/hwmon/applesmc.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/it87.c
drivers/hwmon/sht15.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-pnx.c
drivers/i2c/busses/i2c-stu300.c
drivers/ide/ide-cs.c
drivers/input/input.c
drivers/input/keyboard/matrix_keypad.c
drivers/input/mouse/alps.c
drivers/input/mouse/bcm5974.c
drivers/input/serio/i8042.c
drivers/input/sparse-keymap.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/isdn/gigaset/bas-gigaset.c
drivers/isdn/gigaset/capi.c
drivers/isdn/gigaset/common.c
drivers/isdn/gigaset/gigaset.h
drivers/isdn/gigaset/i4l.c
drivers/isdn/gigaset/interface.c
drivers/isdn/gigaset/proc.c
drivers/isdn/gigaset/ser-gigaset.c
drivers/isdn/gigaset/usb-gigaset.c
drivers/lguest/lguest_device.c
drivers/lguest/x86/core.c
drivers/md/raid5.c
drivers/net/cnic.c
drivers/net/e1000e/netdev.c
drivers/net/forcedeth.c
drivers/net/igb/igb_ethtool.c
drivers/net/igb/igb_main.c
drivers/net/myri10ge/myri10ge.c
drivers/net/pcmcia/smc91c92_cs.c
drivers/net/qlcnic/qlcnic_hw.c
drivers/net/r6040.c
drivers/net/stmmac/stmmac_main.c
drivers/net/tun.c
drivers/net/virtio_net.c
drivers/net/wan/hdlc_ppp.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/iwlwifi/iwl-4965.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
drivers/net/wireless/iwlwifi/iwl-calib.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-tx.c
drivers/pcmcia/cistpl.c
drivers/pcmcia/db1xxx_ss.c
drivers/pcmcia/ds.c
drivers/pcmcia/pcmcia_resource.c
drivers/pcmcia/rsrc_nonstatic.c
drivers/regulator/mc13783-regulator.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_3990_erp.c
drivers/s390/char/zcore.c
drivers/s390/cio/chsc.c
drivers/s390/cio/chsc_sch.c
drivers/s390/cio/cio.c
drivers/s390/cio/css.c
drivers/s390/cio/device_fsm.c
drivers/s390/scsi/zfcp_fsf.c
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/bnx2i/bnx2i.h
drivers/scsi/bnx2i/bnx2i_init.c
drivers/scsi/bnx2i/bnx2i_iscsi.c
drivers/scsi/dpt_i2o.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/iscsi_tcp.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/wd7000.c
drivers/serial/mcf.c
drivers/serial/serial_cs.c
drivers/staging/dt3155/dt3155_drv.c
drivers/usb/core/driver.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-mem.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci.h
drivers/usb/host/ohci-da8xx.c
drivers/usb/misc/usbsevseg.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/qcaux.c
drivers/usb/serial/sierra.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/ti_usb_3410_5052.h
drivers/usb/wusbcore/devconnect.c
drivers/vhost/vhost.c
drivers/virtio/virtio_balloon.c
drivers/watchdog/Kconfig
drivers/watchdog/booke_wdt.c
drivers/watchdog/max63xx_wdt.c
fs/afs/mntpt.c
fs/binfmt_flat.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/messenger.c
fs/ceph/osdmap.c
fs/ceph/osdmap.h
fs/ceph/rados.h
fs/ceph/snap.c
fs/ceph/super.h
fs/ecryptfs/crypto.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/inode.c
fs/ecryptfs/mmap.c
fs/ecryptfs/super.c
fs/ioctl.c
fs/jfs/inode.c
fs/jfs/jfs_dmap.c
fs/jfs/jfs_dmap.h
fs/jfs/jfs_inode.h
fs/jfs/namei.c
fs/jfs/resize.c
fs/jfs/symlink.c
fs/logfs/gc.c
fs/logfs/journal.c
fs/logfs/logfs.h
fs/logfs/readwrite.c
fs/logfs/segment.c
fs/logfs/super.c
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/inode.c
fs/nfs/nfs4proc.c
fs/nfs/write.c
fs/quota/Kconfig
fs/quota/dquot.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/xfs_log.c
include/drm/drm_pciids.h
include/linux/firewire-cdev.h
include/linux/firewire-constants.h
include/linux/fs.h
include/linux/input/matrix_keypad.h
include/linux/kvm_host.h
include/linux/nfs_fs_sb.h
include/linux/rcupdate.h
include/linux/regulator/consumer.h
include/net/x25.h
kernel/cred.c
kernel/power/user.c
kernel/rcupdate.c
lib/Kconfig.debug
lib/dma-debug.c
lib/vsprintf.c
mm/rmap.c
net/bridge/br_multicast.c
net/can/raw.c
net/core/dev.c
net/ipv4/fib_trie.c
net/ipv4/ip_output.c
net/ipv4/udp.c
net/ipv6/ip6_output.c
net/ipv6/udp.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/packet/af_packet.c
net/x25/af_x25.c
net/x25/x25_facilities.c
net/x25/x25_in.c
security/inode.c
security/selinux/ss/avtab.h
virt/kvm/kvm_main.c

index f5395af..40ada93 100644 (file)
@@ -234,7 +234,7 @@ process is as follows:
     Linus, usually the patches that have already been included in the
     -next kernel for a few weeks.  The preferred way to submit big changes
     is using git (the kernel's source management tool, more information
-    can be found at http://git.or.cz/) but plain patches are also just
+    can be found at http://git-scm.com/) but plain patches are also just
     fine.
   - After two weeks a -rc1 kernel is released it is now possible to push
     only patches that do not include new features that could affect the
index a6d32e6..a8536cb 100644 (file)
@@ -34,7 +34,7 @@ NMI handler.
                cpu = smp_processor_id();
                ++nmi_count(cpu);
 
-               if (!rcu_dereference(nmi_callback)(regs, cpu))
+               if (!rcu_dereference_sched(nmi_callback)(regs, cpu))
                        default_do_nmi(regs);
 
                nmi_exit();
@@ -47,12 +47,13 @@ function pointer.  If this handler returns zero, do_nmi() invokes the
 default_do_nmi() function to handle a machine-specific NMI.  Finally,
 preemption is restored.
 
-Strictly speaking, rcu_dereference() is not needed, since this code runs
-only on i386, which does not need rcu_dereference() anyway.  However,
-it is a good documentation aid, particularly for anyone attempting to
-do something similar on Alpha.
+In theory, rcu_dereference_sched() is not needed, since this code runs
+only on i386, which in theory does not need rcu_dereference_sched()
+anyway.  However, in practice it is a good documentation aid, particularly
+for anyone attempting to do something similar on Alpha or on systems
+with aggressive optimizing compilers.
 
-Quick Quiz:  Why might the rcu_dereference() be necessary on Alpha,
+Quick Quiz:  Why might the rcu_dereference_sched() be necessary on Alpha,
             given that the code referenced by the pointer is read-only?
 
 
@@ -99,17 +100,21 @@ invoke irq_enter() and irq_exit() on NMI entry and exit, respectively.
 
 Answer to Quick Quiz
 
-       Why might the rcu_dereference() be necessary on Alpha, given
+       Why might the rcu_dereference_sched() be necessary on Alpha, given
        that the code referenced by the pointer is read-only?
 
        Answer: The caller to set_nmi_callback() might well have
-               initialized some data that is to be used by the
-               new NMI handler.  In this case, the rcu_dereference()
-               would be needed, because otherwise a CPU that received
-               an NMI just after the new handler was set might see
-               the pointer to the new NMI handler, but the old
-               pre-initialized version of the handler's data.
-
-               More important, the rcu_dereference() makes it clear
-               to someone reading the code that the pointer is being
-               protected by RCU.
+               initialized some data that is to be used by the new NMI
+               handler.  In this case, the rcu_dereference_sched() would
+               be needed, because otherwise a CPU that received an NMI
+               just after the new handler was set might see the pointer
+               to the new NMI handler, but the old pre-initialized
+               version of the handler's data.
+
+               This same sad story can happen on other CPUs when using
+               a compiler with aggressive pointer-value speculation
+               optimizations.
+
+               More important, the rcu_dereference_sched() makes it
+               clear to someone reading the code that the pointer is
+               being protected by RCU-sched.
index cbc180f..790d1a8 100644 (file)
@@ -260,7 +260,8 @@ over a rather long period of time, but improvements are always welcome!
        The reason that it is permissible to use RCU list-traversal
        primitives when the update-side lock is held is that doing so
        can be quite helpful in reducing code bloat when common code is
-       shared between readers and updaters.
+       shared between readers and updaters.  Additional primitives
+       are provided for this case, as discussed in lockdep.txt.
 
 10.    Conversely, if you are in an RCU read-side critical section,
        and you don't hold the appropriate update-side lock, you -must-
@@ -344,8 +345,8 @@ over a rather long period of time, but improvements are always welcome!
        requiring SRCU's read-side deadlock immunity or low read-side
        realtime latency.
 
-       Note that, rcu_assign_pointer() and rcu_dereference() relate to
-       SRCU just as they do to other forms of RCU.
+       Note that, rcu_assign_pointer() relates to SRCU just as they do
+       to other forms of RCU.
 
 15.    The whole point of call_rcu(), synchronize_rcu(), and friends
        is to wait until all pre-existing readers have finished before
index fe24b58..d7a49b2 100644 (file)
@@ -32,9 +32,20 @@ checking of rcu_dereference() primitives:
        srcu_dereference(p, sp):
                Check for SRCU read-side critical section.
        rcu_dereference_check(p, c):
-               Use explicit check expression "c".
+               Use explicit check expression "c".  This is useful in
+               code that is invoked by both readers and updaters.
        rcu_dereference_raw(p)
                Don't check.  (Use sparingly, if at all.)
+       rcu_dereference_protected(p, c):
+               Use explicit check expression "c", and omit all barriers
+               and compiler constraints.  This is useful when the data
+               structure cannot change, for example, in code that is
+               invoked only by updaters.
+       rcu_access_pointer(p):
+               Return the value of the pointer and omit all barriers,
+               but retain the compiler constraints that prevent duplicating
+               or coalescsing.  This is useful when when testing the
+               value of the pointer itself, for example, against NULL.
 
 The rcu_dereference_check() check expression can be any boolean
 expression, but would normally include one of the rcu_read_lock_held()
@@ -59,7 +70,20 @@ In case (1), the pointer is picked up in an RCU-safe manner for vanilla
 RCU read-side critical sections, in case (2) the ->file_lock prevents
 any change from taking place, and finally, in case (3) the current task
 is the only task accessing the file_struct, again preventing any change
-from taking place.
+from taking place.  If the above statement was invoked only from updater
+code, it could instead be written as follows:
+
+       file = rcu_dereference_protected(fdt->fd[fd],
+                                        lockdep_is_held(&files->file_lock) ||
+                                        atomic_read(&files->count) == 1);
+
+This would verify cases #2 and #3 above, and furthermore lockdep would
+complain if this was used in an RCU read-side critical section unless one
+of these two cases held.  Because rcu_dereference_protected() omits all
+barriers and compiler constraints, it generates better code than do the
+other flavors of rcu_dereference().  On the other hand, it is illegal
+to use rcu_dereference_protected() if either the RCU-protected pointer
+or the RCU-protected data that it points to can change concurrently.
 
 There are currently only "universal" versions of the rcu_assign_pointer()
 and RCU list-/tree-traversal primitives, which do not (yet) check for
index 1dc00ee..cfaac34 100644 (file)
@@ -840,6 +840,12 @@ SRCU:      Initialization/cleanup
        init_srcu_struct
        cleanup_srcu_struct
 
+All:  lockdep-checked RCU-protected pointer access
+
+       rcu_dereference_check
+       rcu_dereference_protected
+       rcu_access_pointer
+
 See the comment headers in the source code (or the docbook generated
 from them) for more information.
 
index 8490480..c0fc1c7 100644 (file)
@@ -68,6 +68,22 @@ like:
    SYN_MT_REPORT
    SYN_REPORT
 
+Here is the sequence after lifting one of the fingers:
+
+   ABS_MT_POSITION_X
+   ABS_MT_POSITION_Y
+   SYN_MT_REPORT
+   SYN_REPORT
+
+And here is the sequence after lifting the remaining finger:
+
+   SYN_MT_REPORT
+   SYN_REPORT
+
+If the driver reports one of BTN_TOUCH or ABS_PRESSURE in addition to the
+ABS_MT events, the last SYN_MT_REPORT event may be omitted. Otherwise, the
+last SYN_REPORT will be dropped by the input core, resulting in no
+zero-finger event reaching userland.
 
 Event Semantics
 ---------------
@@ -217,11 +233,6 @@ where examples can be found.
 difference between the contact position and the approaching tool position
 could be used to derive tilt.
 [2] The list can of course be extended.
-[3] The multi-touch X driver is currently in the prototyping stage. At the
-time of writing (April 2009), the MT protocol is not yet merged, and the
-prototype implements finger matching, basic mouse support and two-finger
-scrolling. The project aims at improving the quality of current multi-touch
-functionality available in the Synaptics X driver, and in addition
-implement more advanced gestures.
+[3] Multitouch X driver project: http://bitmath.org/code/multitouch/.
 [4] See the section on event computation.
 [5] See the section on finger tracking.
index e4cbca5..839b21b 100644 (file)
@@ -320,11 +320,6 @@ and is between 256 and 4096 characters. It is defined in the file
        amd_iommu=      [HW,X86-84]
                        Pass parameters to the AMD IOMMU driver in the system.
                        Possible values are:
-                       isolate - enable device isolation (each device, as far
-                                 as possible, will get its own protection
-                                 domain) [default]
-                       share - put every device behind one IOMMU into the
-                               same protection domain
                        fullflush - enable flushing of IO/TLB entries when
                                    they are unmapped. Otherwise they are
                                    flushed before they will be reused, which
@@ -1199,7 +1194,7 @@ and is between 256 and 4096 characters. It is defined in the file
 
        libata.force=   [LIBATA] Force configurations.  The format is comma
                        separated list of "[ID:]VAL" where ID is
-                       PORT[:DEVICE].  PORT and DEVICE are decimal numbers
+                       PORT[.DEVICE].  PORT and DEVICE are decimal numbers
                        matching port, link or device.  Basically, it matches
                        the ATA ID string printed on console by libata.  If
                        the whole ID part is omitted, the last PORT and DEVICE
index 0e58b45..e8c8f4f 100644 (file)
@@ -41,11 +41,12 @@ SOF_TIMESTAMPING_SOFTWARE:     return system time stamp generated in
 SOF_TIMESTAMPING_TX/RX determine how time stamps are generated.
 SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the
 following control message:
-    struct scm_timestamping {
-           struct timespec systime;
-           struct timespec hwtimetrans;
-           struct timespec hwtimeraw;
-    };
+
+struct scm_timestamping {
+       struct timespec systime;
+       struct timespec hwtimetrans;
+       struct timespec hwtimeraw;
+};
 
 recvmsg() can be used to get this control message for regular incoming
 packets. For send time stamps the outgoing packet is looped back to
@@ -87,12 +88,13 @@ by the network device and will be empty without that support.
 SIOCSHWTSTAMP:
 
 Hardware time stamping must also be initialized for each device driver
-that is expected to do hardware time stamping. The parameter is:
+that is expected to do hardware time stamping. The parameter is defined in
+/include/linux/net_tstamp.h as:
 
 struct hwtstamp_config {
-    int flags;           /* no flags defined right now, must be zero */
-    int tx_type;         /* HWTSTAMP_TX_* */
-    int rx_filter;       /* HWTSTAMP_FILTER_* */
+       int flags;      /* no flags defined right now, must be zero */
+       int tx_type;    /* HWTSTAMP_TX_* */
+       int rx_filter;  /* HWTSTAMP_FILTER_* */
 };
 
 Desired behavior is passed into the kernel and to a specific device by
@@ -139,42 +141,56 @@ enum {
        /* time stamp any incoming packet */
        HWTSTAMP_FILTER_ALL,
 
-        /* return value: time stamp all packets requested plus some others */
-        HWTSTAMP_FILTER_SOME,
+       /* return value: time stamp all packets requested plus some others */
+       HWTSTAMP_FILTER_SOME,
 
        /* PTP v1, UDP, any kind of event packet */
        HWTSTAMP_FILTER_PTP_V1_L4_EVENT,
 
-        ...
+       /* for the complete list of values, please check
+        * the include file /include/linux/net_tstamp.h
+        */
 };
 
 
 DEVICE IMPLEMENTATION
 
 A driver which supports hardware time stamping must support the
-SIOCSHWTSTAMP ioctl. Time stamps for received packets must be stored
-in the skb with skb_hwtstamp_set().
+SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with
+the actual values as described in the section on SIOCSHWTSTAMP.
+
+Time stamps for received packets must be stored in the skb. To get a pointer
+to the shared time stamp structure of the skb call skb_hwtstamps(). Then
+set the time stamps in the structure:
+
+struct skb_shared_hwtstamps {
+       /* hardware time stamp transformed into duration
+        * since arbitrary point in time
+        */
+       ktime_t hwtstamp;
+       ktime_t syststamp; /* hwtstamp transformed to system time base */
+};
 
 Time stamps for outgoing packets are to be generated as follows:
-- In hard_start_xmit(), check if skb_hwtstamp_check_tx_hardware()
-  returns non-zero. If yes, then the driver is expected
-  to do hardware time stamping.
+- In hard_start_xmit(), check if skb_tx(skb)->hardware is set no-zero.
+  If yes, then the driver is expected to do hardware time stamping.
 - If this is possible for the skb and requested, then declare
-  that the driver is doing the time stamping by calling
-  skb_hwtstamp_tx_in_progress(). A driver not supporting
-  hardware time stamping doesn't do that. A driver must never
-  touch sk_buff::tstamp! It is used to store how time stamping
-  for an outgoing packets is to be done.
+  that the driver is doing the time stamping by setting the field
+  skb_tx(skb)->in_progress non-zero. You might want to keep a pointer
+  to the associated skb for the next step and not free the skb. A driver
+  not supporting hardware time stamping doesn't do that. A driver must
+  never touch sk_buff::tstamp! It is used to store software generated
+  time stamps by the network subsystem.
 - As soon as the driver has sent the packet and/or obtained a
   hardware time stamp for it, it passes the time stamp back by
   calling skb_hwtstamp_tx() with the original skb, the raw
-  hardware time stamp and a handle to the device (necessary
-  to convert the hardware time stamp to system time). If obtaining
-  the hardware time stamp somehow fails, then the driver should
-  not fall back to software time stamping. The rationale is that
-  this would occur at a later time in the processing pipeline
-  than other software time stamping and therefore could lead
-  to unexpected deltas between time stamps.
-- If the driver did not call skb_hwtstamp_tx_in_progress(), then
+  hardware time stamp. skb_hwtstamp_tx() clones the original skb and
+  adds the timestamps, therefore the original skb has to be freed now.
+  If obtaining the hardware time stamp somehow fails, then the driver
+  should not fall back to software time stamping. The rationale is that
+  this would occur at a later time in the processing pipeline than other
+  software time stamping and therefore could lead to unexpected deltas
+  between time stamps.
+- If the driver did not call set skb_tx(skb)->in_progress, then
   dev_hard_start_xmit() checks whether software time stamping
   is wanted as fallback and potentially generates the time stamp.
index 5effa5b..e213f45 100644 (file)
@@ -18,16 +18,15 @@ Rules on what kind of patches are accepted, and which ones are not, into the
  - It cannot contain any "trivial" fixes in it (spelling changes,
    whitespace cleanups, etc).
  - It must follow the Documentation/SubmittingPatches rules.
- - It or an equivalent fix must already exist in Linus' tree.  Quote the
-   respective commit ID in Linus' tree in your patch submission to -stable.
+ - It or an equivalent fix must already exist in Linus' tree (upstream).
 
 
 Procedure for submitting patches to the -stable tree:
 
  - Send the patch, after verifying that it follows the above rules, to
-   stable@kernel.org.
- - To have the patch automatically included in the stable tree, add the
  the tag
+   stable@kernel.org.  You must note the upstream commit ID in the changelog
+   of your submission.
- To have the patch automatically included in the stable tree, add the tag
      Cc: stable@kernel.org
    in the sign-off area. Once the patch is merged it will be applied to
    the stable tree without anything else needing to be done by the author
index 5b42290..1838875 100644 (file)
@@ -485,8 +485,8 @@ S:  Maintained
 F:     drivers/input/mouse/bcm5974.c
 
 APPLE SMC DRIVER
-M:     Nicolas Boichat <nicolas@boichat.ch>
-L:     mactel-linux-devel@lists.sourceforge.net
+M:     Henrik Rydberg <rydberg@euromail.se>
+L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     drivers/hwmon/applesmc.c
 
@@ -1960,7 +1960,7 @@ F:        lib/kobj*
 
 DRM DRIVERS
 M:     David Airlie <airlied@linux.ie>
-L:     dri-devel@lists.sourceforge.net
+L:     dri-devel@lists.freedesktop.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
 S:     Maintained
 F:     drivers/gpu/drm/
@@ -4791,12 +4791,11 @@ F:      drivers/s390/crypto/
 
 S390 ZFCP DRIVER
 M:     Christof Schmitt <christof.schmitt@de.ibm.com>
-M:     Martin Peschke <mp3@de.ibm.com>
+M:     Swen Schillig <swen@vnet.ibm.com>
 M:     linux390@de.ibm.com
 L:     linux-s390@vger.kernel.org
 W:     http://www.ibm.com/developerworks/linux/linux390/
 S:     Supported
-F:     Documentation/s390/zfcpdump.txt
 F:     drivers/s390/scsi/zfcp_*
 
 S390 IUCV NETWORK LAYER
index 9754615..fa1db90 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 34
-EXTRAVERSION = -rc4
-NAME = Man-Eating Seals of Antiquity
+EXTRAVERSION = -rc5
+NAME = Sheep on Meth
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
index 7f36d00..feb988a 100644 (file)
 
 #define kmap_prot              PAGE_KERNEL
 
-#define flush_cache_kmaps()    flush_cache_all()
+#define flush_cache_kmaps() \
+       do { \
+               if (cache_is_vivt()) \
+                       flush_cache_all(); \
+       } while (0)
 
 extern pte_t *pkmap_page_table;
 
@@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page);
 extern void *kmap_high_get(struct page *page);
 extern void kunmap_high(struct page *page);
 
+extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
+extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
+
+/*
+ * The following functions are already defined by <linux/highmem.h>
+ * when CONFIG_HIGHMEM is not set.
+ */
+#ifdef CONFIG_HIGHMEM
 extern void *kmap(struct page *page);
 extern void kunmap(struct page *page);
 extern void *kmap_atomic(struct page *page, enum km_type type);
 extern void kunmap_atomic(void *kvaddr, enum km_type type);
 extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
 extern struct page *kmap_atomic_to_page(const void *ptr);
+#endif
 
 #endif
index c019949..c4b2ea3 100644 (file)
@@ -18,6 +18,7 @@ enum km_type {
        KM_IRQ1,
        KM_SOFTIRQ0,
        KM_SOFTIRQ1,
+       KM_L1_CACHE,
        KM_L2_CACHE,
        KM_TYPE_NR
 };
index bf65e9f..47f023a 100644 (file)
@@ -59,23 +59,22 @@ struct iwmmxt_sigframe {
 #endif /* CONFIG_IWMMXT */
 
 #ifdef CONFIG_VFP
-#if __LINUX_ARM_ARCH__ < 6
-/* For ARM pre-v6, we use fstmiax and fldmiax.  This adds one extra
- * word after the registers, and a word of padding at the end for
- * alignment.  */
 #define VFP_MAGIC              0x56465001
-#define VFP_STORAGE_SIZE       152
-#else
-#define VFP_MAGIC              0x56465002
-#define VFP_STORAGE_SIZE       144
-#endif
 
 struct vfp_sigframe
 {
        unsigned long           magic;
        unsigned long           size;
-       union vfp_state         storage;
-};
+       struct user_vfp         ufp;
+       struct user_vfp_exc     ufp_exc;
+} __attribute__((__aligned__(8)));
+
+/*
+ *  8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc,
+ *  4 bytes padding.
+ */
+#define VFP_STORAGE_SIZE       sizeof(struct vfp_sigframe)
+
 #endif /* CONFIG_VFP */
 
 /*
@@ -91,7 +90,7 @@ struct aux_sigframe {
 #ifdef CONFIG_IWMMXT
        struct iwmmxt_sigframe  iwmmxt;
 #endif
-#if 0 && defined CONFIG_VFP /* Not yet saved.  */
+#ifdef CONFIG_VFP
        struct vfp_sigframe     vfp;
 #endif
        /* Something that isn't a valid magic number for any coprocessor.  */
index df95e05..05ac4b0 100644 (file)
@@ -83,11 +83,21 @@ struct user{
 
 /*
  * User specific VFP registers. If only VFPv2 is present, registers 16 to 31
- * are ignored by the ptrace system call.
+ * are ignored by the ptrace system call and the signal handler.
  */
 struct user_vfp {
        unsigned long long fpregs[32];
        unsigned long fpscr;
 };
 
+/*
+ * VFP exception registers exposed to user space during signal delivery.
+ * Fields not relavant to the current VFP architecture are ignored.
+ */
+struct user_vfp_exc {
+       unsigned long   fpexc;
+       unsigned long   fpinst;
+       unsigned long   fpinst2;
+};
+
 #endif /* _ARM_USER_H */
index e7714f3..907d5a6 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/cacheflush.h>
 #include <asm/ucontext.h>
 #include <asm/unistd.h>
+#include <asm/vfp.h>
 
 #include "ptrace.h"
 #include "signal.h"
@@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
 
 #endif
 
+#ifdef CONFIG_VFP
+
+static int preserve_vfp_context(struct vfp_sigframe __user *frame)
+{
+       struct thread_info *thread = current_thread_info();
+       struct vfp_hard_struct *h = &thread->vfpstate.hard;
+       const unsigned long magic = VFP_MAGIC;
+       const unsigned long size = VFP_STORAGE_SIZE;
+       int err = 0;
+
+       vfp_sync_hwstate(thread);
+       __put_user_error(magic, &frame->magic, err);
+       __put_user_error(size, &frame->size, err);
+
+       /*
+        * Copy the floating point registers. There can be unused
+        * registers see asm/hwcap.h for details.
+        */
+       err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs,
+                             sizeof(h->fpregs));
+       /*
+        * Copy the status and control register.
+        */
+       __put_user_error(h->fpscr, &frame->ufp.fpscr, err);
+
+       /*
+        * Copy the exception registers.
+        */
+       __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err);
+       __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
+       __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
+
+       return err ? -EFAULT : 0;
+}
+
+static int restore_vfp_context(struct vfp_sigframe __user *frame)
+{
+       struct thread_info *thread = current_thread_info();
+       struct vfp_hard_struct *h = &thread->vfpstate.hard;
+       unsigned long magic;
+       unsigned long size;
+       unsigned long fpexc;
+       int err = 0;
+
+       __get_user_error(magic, &frame->magic, err);
+       __get_user_error(size, &frame->size, err);
+
+       if (err)
+               return -EFAULT;
+       if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+               return -EINVAL;
+
+       /*
+        * Copy the floating point registers. There can be unused
+        * registers see asm/hwcap.h for details.
+        */
+       err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs,
+                               sizeof(h->fpregs));
+       /*
+        * Copy the status and control register.
+        */
+       __get_user_error(h->fpscr, &frame->ufp.fpscr, err);
+
+       /*
+        * Sanitise and restore the exception registers.
+        */
+       __get_user_error(fpexc, &frame->ufp_exc.fpexc, err);
+       /* Ensure the VFP is enabled. */
+       fpexc |= FPEXC_EN;
+       /* Ensure FPINST2 is invalid and the exception flag is cleared. */
+       fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
+       h->fpexc = fpexc;
+
+       __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
+       __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
+
+       if (!err)
+               vfp_flush_hwstate(thread);
+
+       return err ? -EFAULT : 0;
+}
+
+#endif
+
 /*
  * Do a signal return; undo the signal stack.  These are aligned to 64-bit.
  */
@@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
                err |= restore_iwmmxt_context(&aux->iwmmxt);
 #endif
 #ifdef CONFIG_VFP
-//     if (err == 0)
-//             err |= vfp_restore_state(&sf->aux.vfp);
+       if (err == 0)
+               err |= restore_vfp_context(&aux->vfp);
 #endif
 
        return err;
@@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
                err |= preserve_iwmmxt_context(&aux->iwmmxt);
 #endif
 #ifdef CONFIG_VFP
-//     if (err == 0)
-//             err |= vfp_save_state(&sf->aux.vfp);
+       if (err == 0)
+               err |= preserve_vfp_context(&aux->vfp);
 #endif
        __put_user_error(0, &aux->end_magic, err);
 
index 027dd57..d400455 100644 (file)
@@ -16,8 +16,8 @@ obj-$(CONFIG_ARCH_AT91SAM9261)        += at91sam9261.o at91sam926x_time.o at91sam9261_d
 obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
 obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o
 obj-$(CONFIG_ARCH_AT91SAM9RL)  += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o  sam9_smc.o
- obj-$(CONFIG_ARCH_AT91SAM9G45)        += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
 obj-$(CONFIG_ARCH_AT91CAP9)    += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
 obj-$(CONFIG_ARCH_AT572D940HF)  += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o
 obj-$(CONFIG_ARCH_AT91X40)     += at91x40.o at91x40_time.o
index 9fcbd6c..9c5b48e 100644 (file)
@@ -175,8 +175,6 @@ ENTRY(at91_slow_clock)
        orr     r3, r3, #(1 << 29)              /* bit 29 always set */
        str     r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)]
 
-       wait_pllalock
-
        /* Save PLLB setting and disable it */
        ldr     r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
        str     r3, .saved_pllbr
@@ -184,8 +182,6 @@ ENTRY(at91_slow_clock)
        mov     r3, #AT91_PMC_PLLCOUNT
        str     r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
 
-       wait_pllblock
-
        /* Turn off the main oscillator */
        ldr     r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)]
        bic     r3, r3, #AT91_PMC_MOSCEN
index 8bca4de..f55fa10 100644 (file)
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
        kfrom = kmap_atomic(from, KM_USER0);
        kto = kmap_atomic(to, KM_USER1);
        copy_page(kto, kfrom);
-#ifdef CONFIG_HIGHMEM
-       /*
-        * kmap_atomic() doesn't set the page virtual address, and
-        * kunmap_atomic() takes care of cache flushing already.
-        */
-       if (page_address(to) != NULL)
-#endif
-               __cpuc_flush_dcache_area(kto, PAGE_SIZE);
+       __cpuc_flush_dcache_area(kto, PAGE_SIZE);
        kunmap_atomic(kto, KM_USER1);
        kunmap_atomic(kfrom, KM_USER0);
 }
index 1351edc..13fa536 100644 (file)
@@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
                                vaddr += offset;
                                op(vaddr, len, dir);
                                kunmap_high(page);
+                       } else if (cache_is_vipt()) {
+                               pte_t saved_pte;
+                               vaddr = kmap_high_l1_vipt(page, &saved_pte);
+                               op(vaddr + offset, len, dir);
+                               kunmap_high_l1_vipt(page, saved_pte);
                        }
                } else {
                        vaddr = page_address(page) + offset;
index e34f095..c6844cb 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <asm/cacheflush.h>
 #include <asm/cachetype.h>
+#include <asm/highmem.h>
 #include <asm/smp_plat.h>
 #include <asm/system.h>
 #include <asm/tlbflush.h>
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 
 void __flush_dcache_page(struct address_space *mapping, struct page *page)
 {
-       void *addr = page_address(page);
-
        /*
         * Writeback any data associated with the kernel mapping of this
         * page.  This ensures that data in the physical page is mutually
         * coherent with the kernels mapping.
         */
-#ifdef CONFIG_HIGHMEM
-       /*
-        * kmap_atomic() doesn't set the page virtual address, and
-        * kunmap_atomic() takes care of cache flushing already.
-        */
-       if (addr)
-#endif
-               __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+       if (!PageHighMem(page)) {
+               __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+       } else {
+               void *addr = kmap_high_get(page);
+               if (addr) {
+                       __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+                       kunmap_high(page);
+               } else if (cache_is_vipt()) {
+                       pte_t saved_pte;
+                       addr = kmap_high_l1_vipt(page, &saved_pte);
+                       __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+                       kunmap_high_l1_vipt(page, saved_pte);
+               }
+       }
 
        /*
         * If this is a page cache page, and we have an aliasing VIPT cache,
index 2be1ec7..77b030f 100644 (file)
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
        unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
 
        if (kvaddr >= (void *)FIXADDR_START) {
-               __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
+               if (cache_is_vivt())
+                       __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
 #ifdef CONFIG_DEBUG_HIGHMEM
                BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
                set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)
        pte = TOP_PTE(vaddr);
        return pte_page(*pte);
 }
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+
+#include <linux/percpu.h>
+
+/*
+ * The VIVT cache of a highmem page is always flushed before the page
+ * is unmapped. Hence unmapped highmem pages need no cache maintenance
+ * in that case.
+ *
+ * However unmapped pages may still be cached with a VIPT cache, and
+ * it is not possible to perform cache maintenance on them using physical
+ * addresses unfortunately.  So we have no choice but to set up a temporary
+ * virtual mapping for that purpose.
+ *
+ * Yet this VIPT cache maintenance may be triggered from DMA support
+ * functions which are possibly called from interrupt context. As we don't
+ * want to keep interrupt disabled all the time when such maintenance is
+ * taking place, we therefore allow for some reentrancy by preserving and
+ * restoring the previous fixmap entry before the interrupted context is
+ * resumed.  If the reentrancy depth is 0 then there is no need to restore
+ * the previous fixmap, and leaving the current one in place allow it to
+ * be reused the next time without a TLB flush (common with DMA).
+ */
+
+static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
+
+void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
+{
+       unsigned int idx, cpu = smp_processor_id();
+       int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+       unsigned long vaddr, flags;
+       pte_t pte, *ptep;
+
+       idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       ptep = TOP_PTE(vaddr);
+       pte = mk_pte(page, kmap_prot);
+
+       if (!in_interrupt())
+               preempt_disable();
+
+       raw_local_irq_save(flags);
+       (*depth)++;
+       if (pte_val(*ptep) == pte_val(pte)) {
+               *saved_pte = pte;
+       } else {
+               *saved_pte = *ptep;
+               set_pte_ext(ptep, pte, 0);
+               local_flush_tlb_kernel_page(vaddr);
+       }
+       raw_local_irq_restore(flags);
+
+       return (void *)vaddr;
+}
+
+void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
+{
+       unsigned int idx, cpu = smp_processor_id();
+       int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+       unsigned long vaddr, flags;
+       pte_t pte, *ptep;
+
+       idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       ptep = TOP_PTE(vaddr);
+       pte = mk_pte(page, kmap_prot);
+
+       BUG_ON(pte_val(*ptep) != pte_val(pte));
+       BUG_ON(*depth <= 0);
+
+       raw_local_irq_save(flags);
+       (*depth)--;
+       if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
+               set_pte_ext(ptep, saved_pte, 0);
+               local_flush_tlb_kernel_page(vaddr);
+       }
+       raw_local_irq_restore(flags);
+
+       if (!in_interrupt())
+               preempt_enable();
+}
+
+#endif  /* CONFIG_CPU_CACHE_VIPT */
index 4223d08..241c24a 100644 (file)
@@ -1054,10 +1054,12 @@ void setup_mm_for_reboot(char mode)
        pgd_t *pgd;
        int i;
 
-       if (current->mm && current->mm->pgd)
-               pgd = current->mm->pgd;
-       else
-               pgd = init_mm.pgd;
+       /*
+        * We need to access to user-mode page tables here. For kernel threads
+        * we don't have any user-mode mappings so we use the context that we
+        * "borrowed".
+        */
+       pgd = current->active_mm->pgd;
 
        base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
        if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
index a420cb9..315a540 100644 (file)
@@ -428,26 +428,6 @@ static void vfp_pm_init(void)
 static inline void vfp_pm_init(void) { }
 #endif /* CONFIG_PM */
 
-/*
- * Synchronise the hardware VFP state of a thread other than current with the
- * saved one. This function is used by the ptrace mechanism.
- */
-#ifdef CONFIG_SMP
-void vfp_sync_hwstate(struct thread_info *thread)
-{
-}
-
-void vfp_flush_hwstate(struct thread_info *thread)
-{
-       /*
-        * On SMP systems, the VFP state is automatically saved at every
-        * context switch. We mark the thread VFP state as belonging to a
-        * non-existent CPU so that the saved one will be reloaded when
-        * needed.
-        */
-       thread->vfpstate.hard.cpu = NR_CPUS;
-}
-#else
 void vfp_sync_hwstate(struct thread_info *thread)
 {
        unsigned int cpu = get_cpu();
@@ -490,9 +470,18 @@ void vfp_flush_hwstate(struct thread_info *thread)
                last_VFP_context[cpu] = NULL;
        }
 
+#ifdef CONFIG_SMP
+       /*
+        * For SMP we still have to take care of the case where the thread
+        * migrates to another CPU and then back to the original CPU on which
+        * the last VFP user is still the same thread. Mark the thread VFP
+        * state as belonging to a non-existent CPU so that the saved one will
+        * be reloaded in the above case.
+        */
+       thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
        put_cpu();
 }
-#endif
 
 #include <linux/smp.h>
 
index 73c5c2b..7f3c0a2 100644 (file)
@@ -1802,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
 {
        struct kvm_memory_slot *memslot;
        int r, i;
-       long n, base;
+       long base;
+       unsigned long n;
        unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
                        offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
 
@@ -1815,7 +1816,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
        if (!memslot->dirty_bitmap)
                goto out;
 
-       n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+       n = kvm_dirty_bitmap_bytes(memslot);
        base = memslot->base_gfn / BITS_PER_LONG;
 
        for (i = 0; i < n/sizeof(long); ++i) {
@@ -1831,7 +1832,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                struct kvm_dirty_log *log)
 {
        int r;
-       int n;
+       unsigned long n;
        struct kvm_memory_slot *memslot;
        int is_dirty = 0;
 
@@ -1850,7 +1851,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        if (is_dirty) {
                kvm_flush_remote_tlbs(kvm);
                memslot = &kvm->memslots->memslots[log->slot];
-               n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+               n = kvm_dirty_bitmap_bytes(memslot);
                memset(memslot->dirty_bitmap, 0, n);
        }
        r = 0;
index 88b7af2..d9d2ed6 100644 (file)
@@ -148,14 +148,18 @@ static inline int atomic_xchg(atomic_t *v, int new)
 static inline int atomic_sub_and_test(int i, atomic_t *v)
 {
        char c;
-       __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i));
+       __asm__ __volatile__("subl %2,%1; seq %0"
+                            : "=d" (c), "+m" (*v)
+                            : "id" (i));
        return c != 0;
 }
 
 static inline int atomic_add_negative(int i, atomic_t *v)
 {
        char c;
-       __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i));
+       __asm__ __volatile__("addl %2,%1; smi %0"
+                            : "=d" (c), "+m" (*v)
+                            : "id" (i));
        return c != 0;
 }
 
index ef22938..01a8716 100644 (file)
@@ -212,5 +212,10 @@ struct mcf_platform_uart {
 #define        MCFUART_URF_RXS         0xc0            /* Receiver status */
 #endif
 
+#if defined(CONFIG_M5272)
+#define MCFUART_TXFIFOSIZE     25
+#else
+#define MCFUART_TXFIFOSIZE     1
+#endif
 /****************************************************************************/
 #endif /* mcfuart_h */
index 1320eaa..a29dd74 100644 (file)
@@ -17,13 +17,11 @@ struct sigcontext {
 #ifndef __uClinux__
 # ifdef __mcoldfire__
        unsigned long  sc_fpregs[2][2]; /* room for two fp registers */
-       unsigned long  sc_fpcntl[3];
-       unsigned char  sc_fpstate[16+6*8];
 # else
        unsigned long  sc_fpregs[2*3];  /* room for two fp registers */
+# endif
        unsigned long  sc_fpcntl[3];
        unsigned char  sc_fpstate[216];
-# endif
 #endif
 };
 
index ce404bc..1404257 100644 (file)
@@ -94,7 +94,7 @@ cflags-$(CONFIG_M520x)                := $(call cc-option,-mcpu=5208,-m5200)
 cflags-$(CONFIG_M523x)         := $(call cc-option,-mcpu=523x,-m5307)
 cflags-$(CONFIG_M5249)         := $(call cc-option,-mcpu=5249,-m5200)
 cflags-$(CONFIG_M5271)         := $(call cc-option,-mcpu=5271,-m5307)
-cflags-$(CONFIG_M5272)         := $(call cc-option,-mcpu=5271,-m5200)
+cflags-$(CONFIG_M5272)         := $(call cc-option,-mcpu=5272,-m5307)
 cflags-$(CONFIG_M5275)         := $(call cc-option,-mcpu=5275,-m5307)
 cflags-$(CONFIG_M528x)         := $(call cc-option,-m528x,-m5307)
 cflags-$(CONFIG_M5307)         := $(call cc-option,-m5307,-m5200)
index 56043ad..aff6f57 100644 (file)
@@ -145,6 +145,6 @@ ENTRY(ret_from_user_signal)
        trap #0
 
 ENTRY(ret_from_user_rt_signal)
-       move #__NR_rt_sigreturn,%d0
+       movel #__NR_rt_sigreturn,%d0
        trap #0
 
index 1143f77..6f22970 100644 (file)
@@ -107,7 +107,6 @@ void init_IRQ(void)
        _ramvec[vba+CPMVEC_PIO_PC7]     = inthandler;  /* pio - pc7 */
        _ramvec[vba+CPMVEC_PIO_PC6]     = inthandler;  /* pio - pc6 */
        _ramvec[vba+CPMVEC_TIMER3]      = inthandler;  /* timer 3 */
-       _ramvec[vba+CPMVEC_RISCTIMER]   = inthandler;  /* reserved */
        _ramvec[vba+CPMVEC_PIO_PC5]     = inthandler;  /* pio - pc5 */
        _ramvec[vba+CPMVEC_PIO_PC4]     = inthandler;  /* pio - pc4 */
        _ramvec[vba+CPMVEC_RESERVED2]   = inthandler;  /* reserved */
index 25da07f..604af29 100644 (file)
@@ -1004,7 +1004,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        struct kvm_vcpu *vcpu;
        ulong ga, ga_end;
        int is_dirty = 0;
-       int r, n;
+       int r;
+       unsigned long n;
 
        mutex_lock(&kvm->slots_lock);
 
@@ -1022,7 +1023,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                kvm_for_each_vcpu(n, vcpu, kvm)
                        kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
 
-               n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+               n = kvm_dirty_bitmap_bytes(memslot);
                memset(memslot->dirty_bitmap, 0, n);
        }
 
index 4a76d94..533f357 100644 (file)
@@ -29,6 +29,7 @@ struct vdso_data {
        __u32 tz_minuteswest;           /* Minutes west of Greenwich    0x30 */
        __u32 tz_dsttime;               /* Type of dst correction       0x34 */
        __u32 ectg_available;
+       __u32 ntp_mult;                 /* NTP adjusted multiplier      0x3C */
 };
 
 struct vdso_per_cpu_data {
index 08db736..a094089 100644 (file)
@@ -61,6 +61,7 @@ int main(void)
        DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
        DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
        DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
+       DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
        DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
        DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
        /* constants used by the vdso */
index b354427..c56d3f5 100644 (file)
@@ -256,6 +256,9 @@ restore_registers:
        lghi    %r2,0
        brasl   %r14,arch_set_page_states
 
+       /* Reinitialize the channel subsystem */
+       brasl   %r14,channel_subsystem_reinit
+
        /* Return 0 */
        lmg     %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
        lghi    %r2,0
index fba6dec..d906bf1 100644 (file)
@@ -221,6 +221,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
        vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
        vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
        vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
+       vdso_data->ntp_mult = mult;
        smp_wmb();
        ++vdso_data->tb_update_count;
 }
index 4a98909..9696439 100644 (file)
@@ -38,13 +38,13 @@ __kernel_clock_gettime:
        sl      %r1,__VDSO_XTIME_STAMP+4(%r5)
        brc     3,2f
        ahi     %r0,-1
-2:     mhi     %r0,1000                        /* cyc2ns(clock,cycle_delta) */
+2:     ms      %r0,__VDSO_NTP_MULT(%r5)        /* cyc2ns(clock,cycle_delta) */
        lr      %r2,%r0
-       lhi     %r0,1000
+       l       %r0,__VDSO_NTP_MULT(%r5)
        ltr     %r1,%r1
        mr      %r0,%r0
        jnm     3f
-       ahi     %r0,1000
+       a       %r0,__VDSO_NTP_MULT(%r5)
 3:     alr     %r0,%r2
        srdl    %r0,12
        al      %r0,__VDSO_XTIME_NSEC(%r5)      /*  + xtime */
@@ -86,13 +86,13 @@ __kernel_clock_gettime:
        sl      %r1,__VDSO_XTIME_STAMP+4(%r5)
        brc     3,12f
        ahi     %r0,-1
-12:    mhi     %r0,1000                        /* cyc2ns(clock,cycle_delta) */
+12:    ms      %r0,__VDSO_NTP_MULT(%r5)        /* cyc2ns(clock,cycle_delta) */
        lr      %r2,%r0
-       lhi     %r0,1000
+       l       %r0,__VDSO_NTP_MULT(%r5)
        ltr     %r1,%r1
        mr      %r0,%r0
        jnm     13f
-       ahi     %r0,1000
+       a       %r0,__VDSO_NTP_MULT(%r5)
 13:    alr     %r0,%r2
        srdl    %r0,12
        al      %r0,__VDSO_XTIME_NSEC(%r5)      /*  + xtime */
index ad8acfc..2d36331 100644 (file)
@@ -35,13 +35,13 @@ __kernel_gettimeofday:
        sl      %r1,__VDSO_XTIME_STAMP+4(%r5)
        brc     3,3f
        ahi     %r0,-1
-3:     mhi     %r0,1000                        /* cyc2ns(clock,cycle_delta) */
+3:     ms      %r0,__VDSO_NTP_MULT(%r5)        /* cyc2ns(clock,cycle_delta) */
        st      %r0,24(%r15)
-       lhi     %r0,1000
+       l       %r0,__VDSO_NTP_MULT(%r5)
        ltr     %r1,%r1
        mr      %r0,%r0
        jnm     4f
-       ahi     %r0,1000
+       a       %r0,__VDSO_NTP_MULT(%r5)
 4:     al      %r0,24(%r15)
        srdl    %r0,12
        al      %r0,__VDSO_XTIME_NSEC(%r5)      /*  + xtime */
index 49106c6..f404678 100644 (file)
@@ -36,7 +36,7 @@ __kernel_clock_gettime:
        stck    48(%r15)                        /* Store TOD clock */
        lg      %r1,48(%r15)
        sg      %r1,__VDSO_XTIME_STAMP(%r5)     /* TOD - cycle_last */
-       mghi    %r1,1000
+       msgf    %r1,__VDSO_NTP_MULT(%r5)        /*  * NTP adjustment */
        srlg    %r1,%r1,12                      /* cyc2ns(clock,cycle_delta) */
        alg     %r1,__VDSO_XTIME_NSEC(%r5)      /*  + xtime */
        lg      %r0,__VDSO_XTIME_SEC(%r5)
@@ -64,7 +64,7 @@ __kernel_clock_gettime:
        stck    48(%r15)                        /* Store TOD clock */
        lg      %r1,48(%r15)
        sg      %r1,__VDSO_XTIME_STAMP(%r5)     /* TOD - cycle_last */
-       mghi    %r1,1000
+       msgf    %r1,__VDSO_NTP_MULT(%r5)        /*  * NTP adjustment */
        srlg    %r1,%r1,12                      /* cyc2ns(clock,cycle_delta) */
        alg     %r1,__VDSO_XTIME_NSEC(%r5)      /*  + xtime */
        lg      %r0,__VDSO_XTIME_SEC(%r5)
index f873e75..36ee674 100644 (file)
@@ -31,7 +31,7 @@ __kernel_gettimeofday:
        stck    48(%r15)                        /* Store TOD clock */
        lg      %r1,48(%r15)
        sg      %r1,__VDSO_XTIME_STAMP(%r5)     /* TOD - cycle_last */
-       mghi    %r1,1000
+       msgf    %r1,__VDSO_NTP_MULT(%r5)        /*  * NTP adjustment */
        srlg    %r1,%r1,12                      /* cyc2ns(clock,cycle_delta) */
        alg     %r1,__VDSO_XTIME_NSEC(%r5)      /*  + xtime.tv_nsec */
        lg      %r0,__VDSO_XTIME_SEC(%r5)       /* xtime.tv_sec */
index 6db5136..9908d47 100644 (file)
@@ -37,6 +37,9 @@ config SPARC64
        def_bool 64BIT
        select ARCH_SUPPORTS_MSI
        select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FUNCTION_GRAPH_FP_TEST
+       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_KRETPROBES
        select HAVE_KPROBES
        select HAVE_LMB
index 9d3c889..1b4a831 100644 (file)
@@ -19,13 +19,10 @@ config DEBUG_DCFLUSH
        bool "D-cache flush debugging"
        depends on SPARC64 && DEBUG_KERNEL
 
-config STACK_DEBUG
-       bool "Stack Overflow Detection Support"
-
 config MCOUNT
        bool
        depends on SPARC64
-       depends on STACK_DEBUG || FUNCTION_TRACER
+       depends on FUNCTION_TRACER
        default y
 
 config FRAME_POINTER
index 926397d..050ef35 100644 (file)
@@ -17,7 +17,7 @@ typedef struct {
        unsigned int    __nmi_count;
        unsigned long   clock_tick;     /* %tick's per second */
        unsigned long   __pad;
-       unsigned int    __pad1;
+       unsigned int    irq0_irqs;
        unsigned int    __pad2;
 
        /* Dcache line 2, rarely used */
index 8b49bf9..bfa1ea4 100644 (file)
@@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void)
  */
 static inline unsigned long __raw_local_irq_save(void)
 {
-       unsigned long flags = __raw_local_save_flags();
-
-       raw_local_irq_disable();
+       unsigned long flags, tmp;
+
+       /* Disable interrupts to PIL_NORMAL_MAX unless we already
+        * are using PIL_NMI, in which case PIL_NMI is retained.
+        *
+        * The only values we ever program into the %pil are 0,
+        * PIL_NORMAL_MAX and PIL_NMI.
+        *
+        * Since PIL_NMI is the largest %pil value and all bits are
+        * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX
+        * actually is.
+        */
+       __asm__ __volatile__(
+               "rdpr   %%pil, %0\n\t"
+               "or     %0, %2, %1\n\t"
+               "wrpr   %1, 0x0, %%pil"
+               : "=r" (flags), "=r" (tmp)
+               : "i" (PIL_NORMAL_MAX)
+               : "memory"
+       );
 
        return flags;
 }
index 9e2d944..4827a3a 100644 (file)
@@ -111,7 +111,7 @@ struct thread_info {
 #define THREAD_SHIFT PAGE_SHIFT
 #endif /* PAGE_SHIFT == 13 */
 
-#define PREEMPT_ACTIVE         0x4000000
+#define PREEMPT_ACTIVE         0x10000000
 
 /*
  * macros/functions for gaining access to the thread information structure
index c631614..0c2dc1f 100644 (file)
@@ -13,6 +13,14 @@ extra-y     += init_task.o
 CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS)
 extra-y              += vmlinux.lds
 
+ifdef CONFIG_FUNCTION_TRACER
+# Do not profile debug and lowlevel utilities
+CFLAGS_REMOVE_ftrace.o := -pg
+CFLAGS_REMOVE_time_$(BITS).o := -pg
+CFLAGS_REMOVE_perf_event.o := -pg
+CFLAGS_REMOVE_pcr.o := -pg
+endif
+
 obj-$(CONFIG_SPARC32)   += entry.o wof.o wuf.o
 obj-$(CONFIG_SPARC32)   += etrap_32.o
 obj-$(CONFIG_SPARC32)   += rtrap_32.o
@@ -85,7 +93,7 @@ obj-$(CONFIG_KGDB)        += kgdb_$(BITS).o
 
 
 obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-CFLAGS_REMOVE_ftrace.o := -pg
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
 
 obj-$(CONFIG_EARLYFB) += btext.o
 obj-$(CONFIG_STACKTRACE)     += stacktrace.o
index 9103a56..03ab022 100644 (file)
@@ -13,7 +13,7 @@ static const u32 ftrace_nop = 0x01000000;
 
 static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
 {
-       static u32 call;
+       u32 call;
        s32 off;
 
        off = ((s32)addr - (s32)ip);
@@ -91,3 +91,61 @@ int __init ftrace_dyn_arch_init(void *data)
        return 0;
 }
 #endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+       unsigned long ip = (unsigned long)(&ftrace_graph_call);
+       u32 old, new;
+
+       old = *(u32 *) &ftrace_graph_call;
+       new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller);
+       return ftrace_modify_code(ip, old, new);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+       unsigned long ip = (unsigned long)(&ftrace_graph_call);
+       u32 old, new;
+
+       old = *(u32 *) &ftrace_graph_call;
+       new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub);
+
+       return ftrace_modify_code(ip, old, new);
+}
+
+#endif /* !CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+unsigned long prepare_ftrace_return(unsigned long parent,
+                                   unsigned long self_addr,
+                                   unsigned long frame_pointer)
+{
+       unsigned long return_hooker = (unsigned long) &return_to_handler;
+       struct ftrace_graph_ent trace;
+
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               return parent + 8UL;
+
+       if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
+                                    frame_pointer) == -EBUSY)
+               return parent + 8UL;
+
+       trace.func = self_addr;
+
+       /* Only trace if the calling function expects to */
+       if (!ftrace_graph_entry(&trace)) {
+               current->curr_ret_stack--;
+               return parent + 8UL;
+       }
+
+       return return_hooker;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index e1cbdb9..830d70a 100644 (file)
@@ -20,7 +20,9 @@
 #include <linux/delay.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/ftrace.h>
 #include <linux/irq.h>
+#include <linux/kmemleak.h>
 
 #include <asm/ptrace.h>
 #include <asm/processor.h>
@@ -45,6 +47,7 @@
 
 #include "entry.h"
 #include "cpumap.h"
+#include "kstack.h"
 
 #define NUM_IVECS      (IMAP_INR + 1)
 
@@ -647,6 +650,14 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
        bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
        if (unlikely(!bucket))
                return 0;
+
+       /* The only reference we store to the IRQ bucket is
+        * by physical address which kmemleak can't see, tell
+        * it that this object explicitly is not a leak and
+        * should be scanned.
+        */
+       kmemleak_not_leak(bucket);
+
        __flush_dcache_range((unsigned long) bucket,
                             ((unsigned long) bucket +
                              sizeof(struct ino_bucket)));
@@ -703,25 +714,7 @@ void ack_bad_irq(unsigned int virt_irq)
 void *hardirq_stack[NR_CPUS];
 void *softirq_stack[NR_CPUS];
 
-static __attribute__((always_inline)) void *set_hardirq_stack(void)
-{
-       void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
-
-       __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
-       if (orig_sp < sp ||
-           orig_sp > (sp + THREAD_SIZE)) {
-               sp += THREAD_SIZE - 192 - STACK_BIAS;
-               __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
-       }
-
-       return orig_sp;
-}
-static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
-{
-       __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
-}
-
-void handler_irq(int irq, struct pt_regs *regs)
+void __irq_entry handler_irq(int irq, struct pt_regs *regs)
 {
        unsigned long pstate, bucket_pa;
        struct pt_regs *old_regs;
index f5a0fd4..0a2bd0f 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/kgdb.h>
 #include <linux/kdebug.h>
+#include <linux/ftrace.h>
 
 #include <asm/kdebug.h>
 #include <asm/ptrace.h>
@@ -108,7 +109,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
 }
 
 #ifdef CONFIG_SMP
-void smp_kgdb_capture_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs)
 {
        unsigned long flags;
 
index 5247283..53dfb92 100644 (file)
@@ -61,4 +61,23 @@ check_magic:
 
 }
 
+static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
+{
+       void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
+
+       __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
+       if (orig_sp < sp ||
+           orig_sp > (sp + THREAD_SIZE)) {
+               sp += THREAD_SIZE - 192 - STACK_BIAS;
+               __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
+       }
+
+       return orig_sp;
+}
+
+static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
+{
+       __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
+}
+
 #endif /* _KSTACK_H */
index b287b62..a4bd7ba 100644 (file)
@@ -23,6 +23,8 @@
 #include <asm/ptrace.h>
 #include <asm/pcr.h>
 
+#include "kstack.h"
+
 /* We don't have a real NMI on sparc64, but we can fake one
  * up using profiling counter overflow interrupts and interrupt
  * levels.
@@ -92,7 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
 notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
 {
        unsigned int sum, touched = 0;
-       int cpu = smp_processor_id();
+       void *orig_sp;
 
        clear_softint(1 << irq);
 
@@ -100,13 +102,15 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
 
        nmi_enter();
 
+       orig_sp = set_hardirq_stack();
+
        if (notify_die(DIE_NMI, "nmi", regs, 0,
                       pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
                touched = 1;
        else
                pcr_ops->write(PCR_PIC_PRIV);
 
-       sum = kstat_irqs_cpu(0, cpu);
+       sum = local_cpu_data().irq0_irqs;
        if (__get_cpu_var(nmi_touch)) {
                __get_cpu_var(nmi_touch) = 0;
                touched = 1;
@@ -125,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
                pcr_ops->write(pcr_enable);
        }
 
+       restore_hardirq_stack(orig_sp);
+
        nmi_exit();
 }
 
index b775658..8a00058 100644 (file)
@@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm)
                struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL);
 
                if (!rp) {
-                       prom_printf("Cannot allocate IOMMU resource.\n");
-                       prom_halt();
+                       pr_info("%s: Cannot allocate IOMMU resource.\n",
+                               pbm->name);
+                       return;
                }
                rp->name = "IOMMU";
                rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
                rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
                rp->flags = IORESOURCE_BUSY;
-               request_resource(&pbm->mem_space, rp);
+               if (request_resource(&pbm->mem_space, rp)) {
+                       pr_info("%s: Unable to request IOMMU resource.\n",
+                               pbm->name);
+                       kfree(rp);
+               }
        }
 }
 
index 2d94e7a..c4a6a50 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/irq.h>
 
 #include <linux/perf_event.h>
+#include <linux/ftrace.h>
 
 #include <asm/pil.h>
 #include <asm/pcr.h>
@@ -34,7 +35,7 @@ unsigned int picl_shift;
  * Therefore in such situations we defer the work by signalling
  * a lower level cpu IRQ.
  */
-void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
+void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
 {
        struct pt_regs *old_regs;
 
index 83f1873..090b9e9 100644 (file)
@@ -130,7 +130,17 @@ rtrap_xcall:
                 nop
                call                    trace_hardirqs_on
                 nop
-               wrpr                    %l4, %pil
+               /* Do not actually set the %pil here.  We will do that
+                * below after we clear PSTATE_IE in the %pstate register.
+                * If we re-enable interrupts here, we can recurse down
+                * the hardirq stack potentially endlessly, causing a
+                * stack overflow.
+                *
+                * It is tempting to put this test and trace_hardirqs_on
+                * call at the 'rt_continue' label, but that will not work
+                * as that path hits unconditionally and we do not want to
+                * execute this in NMI return paths, for example.
+                */
 #endif
 rtrap_no_irq_enable:
                andcc                   %l1, TSTATE_PRIV, %l3
index 4c53345..b6a2b8f 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/profile.h>
 #include <linux/bootmem.h>
 #include <linux/vmalloc.h>
+#include <linux/ftrace.h>
 #include <linux/cpu.h>
 #include <linux/slab.h>
 
@@ -823,13 +824,13 @@ void arch_send_call_function_single_ipi(int cpu)
                      &cpumask_of_cpu(cpu));
 }
 
-void smp_call_function_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
        generic_smp_call_function_interrupt();
 }
 
-void smp_call_function_single_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
        generic_smp_call_function_single_interrupt();
@@ -965,7 +966,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
        put_cpu();
 }
 
-void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
 {
        struct mm_struct *mm;
        unsigned long flags;
@@ -1149,7 +1150,7 @@ void smp_release(void)
  */
 extern void prom_world(int);
 
-void smp_penguin_jailcell(int irq, struct pt_regs *regs)
+void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
 
@@ -1365,7 +1366,7 @@ void smp_send_reschedule(int cpu)
                      &cpumask_of_cpu(cpu));
 }
 
-void smp_receive_signal_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
 }
index 67e1651..c7bbe6c 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/clocksource.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/ftrace.h>
 
 #include <asm/oplib.h>
 #include <asm/timer.h>
@@ -717,7 +718,7 @@ static struct clock_event_device sparc64_clockevent = {
 };
 static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
 
-void timer_interrupt(int irq, struct pt_regs *regs)
+void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
        unsigned long tick_mask = tick_ops->softint_mask;
@@ -728,6 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
 
        irq_enter();
 
+       local_cpu_data().irq0_irqs++;
        kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
 
        if (unlikely(!evt->event_handler)) {
index 837dfc2..9da57f0 100644 (file)
@@ -2203,27 +2203,6 @@ void dump_stack(void)
 
 EXPORT_SYMBOL(dump_stack);
 
-static inline int is_kernel_stack(struct task_struct *task,
-                                 struct reg_window *rw)
-{
-       unsigned long rw_addr = (unsigned long) rw;
-       unsigned long thread_base, thread_end;
-
-       if (rw_addr < PAGE_OFFSET) {
-               if (task != &init_task)
-                       return 0;
-       }
-
-       thread_base = (unsigned long) task_stack_page(task);
-       thread_end = thread_base + sizeof(union thread_union);
-       if (rw_addr >= thread_base &&
-           rw_addr < thread_end &&
-           !(rw_addr & 0x7UL))
-               return 1;
-
-       return 0;
-}
-
 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
 {
        unsigned long fp = rw->ins[6];
@@ -2252,6 +2231,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
        show_regs(regs);
        add_taint(TAINT_DIE);
        if (regs->tstate & TSTATE_PRIV) {
+               struct thread_info *tp = current_thread_info();
                struct reg_window *rw = (struct reg_window *)
                        (regs->u_regs[UREG_FP] + STACK_BIAS);
 
@@ -2259,8 +2239,8 @@ void die_if_kernel(char *str, struct pt_regs *regs)
                 * find some badly aligned kernel stack.
                 */
                while (rw &&
-                      count++ < 30&&
-                      is_kernel_stack(current, rw)) {
+                      count++ < 30 &&
+                      kstack_valid(tp, (unsigned long) rw)) {
                        printk("Caller[%016lx]: %pS\n", rw->ins[7],
                               (void *) rw->ins[7]);
 
index ebce430..c752c4c 100644 (file)
@@ -50,7 +50,7 @@ static inline enum direction decode_direction(unsigned int insn)
 }
 
 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
-static inline int decode_access_size(unsigned int insn)
+static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
 {
        unsigned int tmp;
 
@@ -66,7 +66,7 @@ static inline int decode_access_size(unsigned int insn)
                return 2;
        else {
                printk("Impossible unaligned trap. insn=%08x\n", insn);
-               die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
+               die_if_kernel("Byte sized unaligned access?!?!", regs);
 
                /* GCC should never warn that control reaches the end
                 * of this function without returning a value because
@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
 {
        enum direction dir = decode_direction(insn);
-       int size = decode_access_size(insn);
+       int size = decode_access_size(regs, insn);
        int orig_asi, asi;
 
        current_thread_info()->kern_una_regs = regs;
index 4e59925..0c1e678 100644 (file)
@@ -46,11 +46,16 @@ SECTIONS
                SCHED_TEXT
                LOCK_TEXT
                KPROBES_TEXT
+               IRQENTRY_TEXT
                *(.gnu.warning)
        } = 0
        _etext = .;
 
        RO_DATA(PAGE_SIZE)
+
+       /* Start of data section */
+       _sdata = .;
+
        .data1 : {
                *(.data1)
        }
index 24b8b12..3ad6cbd 100644 (file)
@@ -7,26 +7,11 @@
 
 #include <linux/linkage.h>
 
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-
 /*
  * This is the main variant and is called by C code.  GCC's -pg option
  * automatically instruments every C function with a call to this.
  */
 
-#ifdef CONFIG_STACK_DEBUG
-
-#define OVSTACKSIZE    4096            /* lets hope this is enough */
-
-       .data
-       .align          8
-panicstring:
-       .asciz          "Stack overflow\n"
-       .align          8
-ovstack:
-       .skip           OVSTACKSIZE
-#endif
        .text
        .align          32
        .globl          _mcount
@@ -35,84 +20,48 @@ ovstack:
        .type           mcount,#function
 _mcount:
 mcount:
-#ifdef CONFIG_STACK_DEBUG
-       /*
-        * Check whether %sp is dangerously low.
-        */
-       ldub            [%g6 + TI_FPDEPTH], %g1
-       srl             %g1, 1, %g3
-       add             %g3, 1, %g3
-       sllx            %g3, 8, %g3                     ! each fpregs frame is 256b
-       add             %g3, 192, %g3
-       add             %g6, %g3, %g3                   ! where does task_struct+frame end?
-       sub             %g3, STACK_BIAS, %g3
-       cmp             %sp, %g3
-       bg,pt           %xcc, 1f
-        nop
-       lduh            [%g6 + TI_CPU], %g1
-       sethi           %hi(hardirq_stack), %g3
-       or              %g3, %lo(hardirq_stack), %g3
-       sllx            %g1, 3, %g1
-       ldx             [%g3 + %g1], %g7
-       sub             %g7, STACK_BIAS, %g7
-       cmp             %sp, %g7
-       bleu,pt         %xcc, 2f
-        sethi          %hi(THREAD_SIZE), %g3
-       add             %g7, %g3, %g7
-       cmp             %sp, %g7
-       blu,pn          %xcc, 1f
-2:      sethi          %hi(softirq_stack), %g3
-       or              %g3, %lo(softirq_stack), %g3
-       ldx             [%g3 + %g1], %g7
-       sub             %g7, STACK_BIAS, %g7
-       cmp             %sp, %g7
-       bleu,pt         %xcc, 3f
-        sethi          %hi(THREAD_SIZE), %g3
-       add             %g7, %g3, %g7
-       cmp             %sp, %g7
-       blu,pn          %xcc, 1f
-        nop
-       /* If we are already on ovstack, don't hop onto it
-        * again, we are already trying to output the stack overflow
-        * message.
-        */
-3:     sethi           %hi(ovstack), %g7               ! cant move to panic stack fast enough
-        or             %g7, %lo(ovstack), %g7
-       add             %g7, OVSTACKSIZE, %g3
-       sub             %g3, STACK_BIAS + 192, %g3
-       sub             %g7, STACK_BIAS, %g7
-       cmp             %sp, %g7
-       blu,pn          %xcc, 2f
-        cmp            %sp, %g3
-       bleu,pn         %xcc, 1f
-        nop
-2:     mov             %g3, %sp
-       sethi           %hi(panicstring), %g3
-       call            prom_printf
-        or             %g3, %lo(panicstring), %o0
-       call            prom_halt
-        nop
-1:
-#endif
 #ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_DYNAMIC_FTRACE
-       mov             %o7, %o0
-       .globl          mcount_call
-mcount_call:
-       call            ftrace_stub
-        mov            %o0, %o7
+       /* Do nothing, the retl/nop below is all we need.  */
 #else
-       sethi           %hi(ftrace_trace_function), %g1
+       sethi           %hi(function_trace_stop), %g1
+       lduw            [%g1 + %lo(function_trace_stop)], %g2
+       brnz,pn         %g2, 2f
+        sethi          %hi(ftrace_trace_function), %g1
        sethi           %hi(ftrace_stub), %g2
        ldx             [%g1 + %lo(ftrace_trace_function)], %g1
        or              %g2, %lo(ftrace_stub), %g2
        cmp             %g1, %g2
        be,pn           %icc, 1f
-        mov            %i7, %o1
-       jmpl            %g1, %g0
-        mov            %o7, %o0
+        mov            %i7, %g3
+       save            %sp, -176, %sp
+       mov             %g3, %o1
+       jmpl            %g1, %o7
+        mov            %i7, %o0
+       ret
+        restore
        /* not reached */
 1:
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       sethi           %hi(ftrace_graph_return), %g1
+       ldx             [%g1 + %lo(ftrace_graph_return)], %g3
+       cmp             %g2, %g3
+       bne,pn          %xcc, 5f
+        sethi          %hi(ftrace_graph_entry_stub), %g2
+       sethi           %hi(ftrace_graph_entry), %g1
+       or              %g2, %lo(ftrace_graph_entry_stub), %g2
+       ldx             [%g1 + %lo(ftrace_graph_entry)], %g1
+       cmp             %g1, %g2
+       be,pt           %xcc, 2f
+        nop
+5:     mov             %i7, %g2
+       mov             %fp, %g3
+       save            %sp, -176, %sp
+       mov             %g2, %l0
+       ba,pt           %xcc, ftrace_graph_caller
+        mov            %g3, %l1
+#endif
+2:
 #endif
 #endif
        retl
@@ -131,14 +80,50 @@ ftrace_stub:
        .globl          ftrace_caller
        .type           ftrace_caller,#function
 ftrace_caller:
-       mov             %i7, %o1
-       mov             %o7, %o0
+       sethi           %hi(function_trace_stop), %g1
+       mov             %i7, %g2
+       lduw            [%g1 + %lo(function_trace_stop)], %g1
+       brnz,pn         %g1, ftrace_stub
+        mov            %fp, %g3
+       save            %sp, -176, %sp
+       mov             %g2, %o1
+       mov             %g2, %l0
+       mov             %g3, %l1
        .globl          ftrace_call
 ftrace_call:
        call            ftrace_stub
-        mov            %o0, %o7
-       retl
+        mov            %i7, %o0
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       .globl          ftrace_graph_call
+ftrace_graph_call:
+       call            ftrace_stub
         nop
+#endif
+       ret
+        restore
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       .size           ftrace_graph_call,.-ftrace_graph_call
+#endif
+       .size           ftrace_call,.-ftrace_call
        .size           ftrace_caller,.-ftrace_caller
 #endif
 #endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+       mov             %l0, %o0
+       mov             %i7, %o1
+       call            prepare_ftrace_return
+        mov            %l1, %o2
+       ret
+        restore        %o0, -8, %i7
+END(ftrace_graph_caller)
+
+ENTRY(return_to_handler)
+       save            %sp, -176, %sp
+       call            ftrace_return_to_handler
+        mov            %fp, %o0
+       jmpl            %o0 + 8, %g0
+        restore
+END(return_to_handler)
+#endif
index 64cda95..7a656bd 100644 (file)
@@ -6,6 +6,7 @@
 #include "linux/irqreturn.h"
 #include "linux/kd.h"
 #include "linux/sched.h"
+#include "linux/slab.h"
 #include "chan_kern.h"
 #include "irq_kern.h"
 #include "irq_user.h"
index 06d6ccf..b6b1096 100644 (file)
@@ -8,7 +8,6 @@
 #include <errno.h>
 #include <sched.h>
 #include <linux/limits.h>
-#include <linux/slab.h>
 #include <sys/socket.h>
 #include <sys/wait.h>
 #include "kern_constants.h"
index 59b4556..e790bc1 100644 (file)
@@ -626,7 +626,7 @@ ia32_sys_call_table:
        .quad stub32_sigreturn
        .quad stub32_clone              /* 120 */
        .quad sys_setdomainname
-       .quad sys_uname
+       .quad sys_newuname
        .quad sys_modify_ldt
        .quad compat_sys_adjtimex
        .quad sys32_mprotect            /* 125 */
index ba19ad4..86a0ff0 100644 (file)
@@ -21,6 +21,7 @@
 #define _ASM_X86_AMD_IOMMU_TYPES_H
 
 #include <linux/types.h>
+#include <linux/mutex.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 
 
 /* constants to configure the command buffer */
 #define CMD_BUFFER_SIZE    8192
+#define CMD_BUFFER_UNINITIALIZED 1
 #define CMD_BUFFER_ENTRIES 512
 #define MMIO_CMD_SIZE_SHIFT 56
 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
@@ -237,6 +239,7 @@ struct protection_domain {
        struct list_head list;  /* for list of all protection domains */
        struct list_head dev_list; /* List of all devices in this domain */
        spinlock_t lock;        /* mostly used to lock the page table*/
+       struct mutex api_lock;  /* protect page tables in the iommu-api path */
        u16 id;                 /* the domain id written to the device table */
        int mode;               /* paging mode (0-6 levels) */
        u64 *pt_root;           /* page table root pointer */
index ba0eed8..b60f292 100644 (file)
 
 #ifndef __ASSEMBLY__
 #include <asm/hw_irq.h>
-#include <asm/kvm_para.h>
 
 /*G:030
  * But first, how does our Guest contact the Host to ask for privileged
  * operations?  There are two ways: the direct way is to make a "hypercall",
  * to make requests of the Host Itself.
  *
- * We use the KVM hypercall mechanism, though completely different hypercall
- * numbers. Seventeen hypercalls are available: the hypercall number is put in
- * the %eax register, and the arguments (when required) are placed in %ebx,
- * %ecx, %edx and %esi.  If a return value makes sense, it's returned in %eax.
+ * Our hypercall mechanism uses the highest unused trap code (traps 32 and
+ * above are used by real hardware interrupts).  Seventeen hypercalls are
+ * available: the hypercall number is put in the %eax register, and the
+ * arguments (when required) are placed in %ebx, %ecx, %edx and %esi.
+ * If a return value makes sense, it's returned in %eax.
  *
  * Grossly invalid calls result in Sudden Death at the hands of the vengeful
  * Host, rather than returning failure.  This reflects Winston Churchill's
  * definition of a gentleman: "someone who is only rude intentionally".
-:*/
+ */
+static inline unsigned long
+hcall(unsigned long call,
+      unsigned long arg1, unsigned long arg2, unsigned long arg3,
+      unsigned long arg4)
+{
+       /* "int" is the Intel instruction to trigger a trap. */
+       asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
+                    /* The call in %eax (aka "a") might be overwritten */
+                    : "=a"(call)
+                      /* The arguments are in %eax, %ebx, %ecx, %edx & %esi */
+                    : "a"(call), "b"(arg1), "c"(arg2), "d"(arg3), "S"(arg4)
+                      /* "memory" means this might write somewhere in memory.
+                       * This isn't true for all calls, but it's safe to tell
+                       * gcc that it might happen so it doesn't get clever. */
+                    : "memory");
+       return call;
+}
 
 /* Can't use our min() macro here: needs to be a constant */
 #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
index f3dadb5..f854d89 100644 (file)
@@ -118,7 +118,7 @@ static bool check_device(struct device *dev)
                return false;
 
        /* No device or no PCI device */
-       if (!dev || dev->bus != &pci_bus_type)
+       if (dev->bus != &pci_bus_type)
                return false;
 
        devid = get_device_id(dev);
@@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
        u32 tail, head;
        u8 *target;
 
+       WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
        tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
        target = iommu->cmd_buf + tail;
        memcpy_toio(target, cmd, sizeof(*cmd));
@@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void)
        struct dma_ops_domain *dma_dom;
        u16 devid;
 
-       while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+       for_each_pci_dev(dev) {
 
                /* Do we handle this device? */
                if (!check_device(&dev->dev))
@@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain)
        list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
                struct device *dev = dev_data->dev;
 
-               do_detach(dev);
+               __detach_device(dev);
                atomic_set(&dev_data->bind, 0);
        }
 
@@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void)
                return NULL;
 
        spin_lock_init(&domain->lock);
+       mutex_init(&domain->api_lock);
        domain->id = domain_id_alloc();
        if (!domain->id)
                goto out_err;
@@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
 
        free_pagetable(domain);
 
-       domain_id_free(domain->id);
-
-       kfree(domain);
+       protection_domain_free(domain);
 
        dom->priv = NULL;
 }
@@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
        iova  &= PAGE_MASK;
        paddr &= PAGE_MASK;
 
+       mutex_lock(&domain->api_lock);
+
        for (i = 0; i < npages; ++i) {
                ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
                if (ret)
@@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
                paddr += PAGE_SIZE;
        }
 
+       mutex_unlock(&domain->api_lock);
+
        return 0;
 }
 
@@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
 
        iova  &= PAGE_MASK;
 
+       mutex_lock(&domain->api_lock);
+
        for (i = 0; i < npages; ++i) {
                iommu_unmap_page(domain, iova, PM_MAP_4k);
                iova  += PAGE_SIZE;
        }
 
        iommu_flush_tlb_pde(domain);
+
+       mutex_unlock(&domain->api_lock);
 }
 
 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
index 42f5350..6360abf 100644 (file)
@@ -138,9 +138,9 @@ int amd_iommus_present;
 bool amd_iommu_np_cache __read_mostly;
 
 /*
- * Set to true if ACPI table parsing and hardware intialization went properly
+ * The ACPI table parsing functions set this variable on an error
  */
-static bool amd_iommu_initialized;
+static int __initdata amd_iommu_init_err;
 
 /*
  * List of protection domains - used during resume
@@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
         */
        for (i = 0; i < table->length; ++i)
                checksum += p[i];
-       if (checksum != 0)
+       if (checksum != 0) {
                /* ACPI table corrupt */
-               return -ENODEV;
+               amd_iommu_init_err = -ENODEV;
+               return 0;
+       }
 
        p += IVRS_HEADER_LENGTH;
 
@@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
        if (cmd_buf == NULL)
                return NULL;
 
-       iommu->cmd_buf_size = CMD_BUFFER_SIZE;
+       iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
 
        return cmd_buf;
 }
@@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
                    &entry, sizeof(entry));
 
        amd_iommu_reset_cmd_buffer(iommu);
+       iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
 }
 
 static void __init free_command_buffer(struct amd_iommu *iommu)
 {
        free_pages((unsigned long)iommu->cmd_buf,
-                  get_order(iommu->cmd_buf_size));
+                  get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
 }
 
 /* allocates the memory where the IOMMU will log its events to */
@@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
                                    h->mmio_phys);
 
                        iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
-                       if (iommu == NULL)
-                               return -ENOMEM;
+                       if (iommu == NULL) {
+                               amd_iommu_init_err = -ENOMEM;
+                               return 0;
+                       }
+
                        ret = init_iommu_one(iommu, h);
-                       if (ret)
-                               return ret;
+                       if (ret) {
+                               amd_iommu_init_err = ret;
+                               return 0;
+                       }
                        break;
                default:
                        break;
@@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table)
        }
        WARN_ON(p != end);
 
-       amd_iommu_initialized = true;
-
        return 0;
 }
 
@@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void)
        if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
                return -ENODEV;
 
+       ret = amd_iommu_init_err;
+       if (ret)
+               goto out;
+
        dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
        alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
        rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
@@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void)
        if (acpi_table_parse("IVRS", init_iommu_all) != 0)
                goto free;
 
-       if (!amd_iommu_initialized)
+       if (amd_iommu_init_err) {
+               ret = amd_iommu_init_err;
                goto free;
+       }
 
        if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
                goto free;
 
+       if (amd_iommu_init_err) {
+               ret = amd_iommu_init_err;
+               goto free;
+       }
+
        ret = sysdev_class_register(&amd_iommu_sysdev_class);
        if (ret)
                goto free;
@@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void)
        if (ret)
                goto free;
 
+       enable_iommus();
+
        if (iommu_pass_through)
                ret = amd_iommu_init_passthrough();
        else
@@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void)
 
        amd_iommu_init_notifier();
 
-       enable_iommus();
-
        if (iommu_pass_through)
                goto out;
 
@@ -1315,6 +1332,7 @@ out:
        return ret;
 
 free:
+       disable_iommus();
 
        amd_iommu_uninit_devices();
 
index 3704997..b5d8b0b 100644 (file)
@@ -393,6 +393,7 @@ void __init gart_iommu_hole_init(void)
        for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
                int bus;
                int dev_base, dev_limit;
+               u32 ctl;
 
                bus = bus_dev_ranges[i].bus;
                dev_base = bus_dev_ranges[i].dev_base;
@@ -406,7 +407,19 @@ void __init gart_iommu_hole_init(void)
                        gart_iommu_aperture = 1;
                        x86_init.iommu.iommu_init = gart_iommu_init;
 
-                       aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
+                       ctl = read_pci_config(bus, slot, 3,
+                                             AMD64_GARTAPERTURECTL);
+
+                       /*
+                        * Before we do anything else disable the GART. It may
+                        * still be enabled if we boot into a crash-kernel here.
+                        * Reconfiguring the GART while it is enabled could have
+                        * unknown side-effects.
+                        */
+                       ctl &= ~GARTEN;
+                       write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
+
+                       aper_order = (ctl >> 1) & 7;
                        aper_size = (32 * 1024 * 1024) << aper_order;
                        aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
                        aper_base <<= 25;
index a4849c1..ebd4c51 100644 (file)
@@ -27,7 +27,6 @@
 #include <asm/cpu.h>
 #include <asm/reboot.h>
 #include <asm/virtext.h>
-#include <asm/x86_init.h>
 
 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
 
@@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
 #ifdef CONFIG_HPET_TIMER
        hpet_disable();
 #endif
-
-#ifdef CONFIG_X86_64
-       x86_platform.iommu_shutdown();
-#endif
-
        crash_save_cpu(regs, safe_smp_processor_id());
 }
index e39e771..e1a93be 100644 (file)
@@ -14,6 +14,8 @@
 #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
 #endif
 
+#include <linux/uaccess.h>
+
 extern void
 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
                unsigned long *stack, unsigned long bp, char *log_lvl);
@@ -42,8 +44,10 @@ static inline unsigned long rewind_frame_pointer(int n)
        get_bp(frame);
 
 #ifdef CONFIG_FRAME_POINTER
-       while (n--)
-               frame = frame->next_frame;
+       while (n--) {
+               if (probe_kernel_address(&frame->next_frame, frame))
+                       break;
+       }
 #endif
 
        return (unsigned long)frame;
index 68cd24f..0f7f130 100644 (file)
@@ -565,6 +565,9 @@ static void enable_gart_translations(void)
 
                enable_gart_translation(dev, __pa(agp_gatt_table));
        }
+
+       /* Flush the GART-TLB to remove stale entries */
+       k8_flush_garts();
 }
 
 /*
index 48aeee8..19a8906 100644 (file)
@@ -1490,8 +1490,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
                for_each_sp(pages, sp, parents, i) {
                        kvm_mmu_zap_page(kvm, sp);
                        mmu_pages_clear_parents(&parents);
+                       zapped++;
                }
-               zapped += pages.nr;
                kvm_mmu_pages_init(parent, &parents, &pages);
        }
 
@@ -1542,14 +1542,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
         */
 
        if (used_pages > kvm_nr_mmu_pages) {
-               while (used_pages > kvm_nr_mmu_pages) {
+               while (used_pages > kvm_nr_mmu_pages &&
+                       !list_empty(&kvm->arch.active_mmu_pages)) {
                        struct kvm_mmu_page *page;
 
                        page = container_of(kvm->arch.active_mmu_pages.prev,
                                            struct kvm_mmu_page, link);
-                       kvm_mmu_zap_page(kvm, page);
+                       used_pages -= kvm_mmu_zap_page(kvm, page);
                        used_pages--;
                }
+               kvm_nr_mmu_pages = used_pages;
                kvm->arch.n_free_mmu_pages = 0;
        }
        else
@@ -1596,7 +1598,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
                    && !sp->role.invalid) {
                        pgprintk("%s: zap %lx %x\n",
                                 __func__, gfn, sp->role.word);
-                       kvm_mmu_zap_page(kvm, sp);
+                       if (kvm_mmu_zap_page(kvm, sp))
+                               nn = bucket->first;
                }
        }
 }
index 445c594..2ba5820 100644 (file)
@@ -706,29 +706,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
        if (err)
                goto free_svm;
 
+       err = -ENOMEM;
        page = alloc_page(GFP_KERNEL);
-       if (!page) {
-               err = -ENOMEM;
+       if (!page)
                goto uninit;
-       }
 
-       err = -ENOMEM;
        msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
        if (!msrpm_pages)
-               goto uninit;
+               goto free_page1;
 
        nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
        if (!nested_msrpm_pages)
-               goto uninit;
-
-       svm->msrpm = page_address(msrpm_pages);
-       svm_vcpu_init_msrpm(svm->msrpm);
+               goto free_page2;
 
        hsave_page = alloc_page(GFP_KERNEL);
        if (!hsave_page)
-               goto uninit;
+               goto free_page3;
+
        svm->nested.hsave = page_address(hsave_page);
 
+       svm->msrpm = page_address(msrpm_pages);
+       svm_vcpu_init_msrpm(svm->msrpm);
+
        svm->nested.msrpm = page_address(nested_msrpm_pages);
 
        svm->vmcb = page_address(page);
@@ -744,6 +743,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 
        return &svm->vcpu;
 
+free_page3:
+       __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page2:
+       __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page1:
+       __free_page(page);
 uninit:
        kvm_vcpu_uninit(&svm->vcpu);
 free_svm:
index 686492e..bc933cf 100644 (file)
@@ -77,6 +77,8 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
 
+#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
+
 /*
  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
  * ple_gap:    upper bound on the amount of time between two successive
@@ -131,7 +133,7 @@ struct vcpu_vmx {
        } host_state;
        struct {
                int vm86_active;
-               u8 save_iopl;
+               ulong save_rflags;
                struct kvm_save_segment {
                        u16 selector;
                        unsigned long base;
@@ -818,18 +820,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
 
 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
 {
-       unsigned long rflags;
+       unsigned long rflags, save_rflags;
 
        rflags = vmcs_readl(GUEST_RFLAGS);
-       if (to_vmx(vcpu)->rmode.vm86_active)
-               rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
+       if (to_vmx(vcpu)->rmode.vm86_active) {
+               rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+               save_rflags = to_vmx(vcpu)->rmode.save_rflags;
+               rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
+       }
        return rflags;
 }
 
 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 {
-       if (to_vmx(vcpu)->rmode.vm86_active)
+       if (to_vmx(vcpu)->rmode.vm86_active) {
+               to_vmx(vcpu)->rmode.save_rflags = rflags;
                rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
+       }
        vmcs_writel(GUEST_RFLAGS, rflags);
 }
 
@@ -1483,8 +1490,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
        vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
 
        flags = vmcs_readl(GUEST_RFLAGS);
-       flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
-       flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
+       flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+       flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
        vmcs_writel(GUEST_RFLAGS, flags);
 
        vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1557,8 +1564,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
        vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
 
        flags = vmcs_readl(GUEST_RFLAGS);
-       vmx->rmode.save_iopl
-               = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+       vmx->rmode.save_rflags = flags;
 
        flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
 
index 24cd0ee..3c4ca98 100644 (file)
@@ -433,8 +433,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
 #ifdef CONFIG_X86_64
        if (cr0 & 0xffffffff00000000UL) {
-               printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
-                      cr0, kvm_read_cr0(vcpu));
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -443,14 +441,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        cr0 &= ~CR0_RESERVED_BITS;
 
        if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
-               printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
-               printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
-                      "and a clear PE flag\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -461,15 +456,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                        int cs_db, cs_l;
 
                        if (!is_pae(vcpu)) {
-                               printk(KERN_DEBUG "set_cr0: #GP, start paging "
-                                      "in long mode while PAE is disabled\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
                        }
                        kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
                        if (cs_l) {
-                               printk(KERN_DEBUG "set_cr0: #GP, start paging "
-                                      "in long mode while CS.L == 1\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
 
@@ -477,8 +468,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                } else
 #endif
                if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
-                       printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
-                              "reserved bits\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
@@ -505,28 +494,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
 
        if (cr4 & CR4_RESERVED_BITS) {
-               printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (is_long_mode(vcpu)) {
                if (!(cr4 & X86_CR4_PAE)) {
-                       printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
-                              "in long mode\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
        } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
                   && ((cr4 ^ old_cr4) & pdptr_bits)
                   && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
-               printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (cr4 & X86_CR4_VMXE) {
-               printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -547,21 +531,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 
        if (is_long_mode(vcpu)) {
                if (cr3 & CR3_L_MODE_RESERVED_BITS) {
-                       printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
        } else {
                if (is_pae(vcpu)) {
                        if (cr3 & CR3_PAE_RESERVED_BITS) {
-                               printk(KERN_DEBUG
-                                      "set_cr3: #GP, reserved bits\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
                        }
                        if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
-                               printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
-                                      "reserved bits\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
                        }
@@ -593,7 +572,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
 {
        if (cr8 & CR8_RESERVED_BITS) {
-               printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -649,15 +627,12 @@ static u32 emulated_msrs[] = {
 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        if (efer & efer_reserved_bits) {
-               printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
-                      efer);
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (is_paging(vcpu)
            && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
-               printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -667,7 +642,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
 
                feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
                if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
-                       printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
@@ -678,7 +652,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
 
                feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
                if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
-                       printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
@@ -967,9 +940,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                if (msr >= MSR_IA32_MC0_CTL &&
                    msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
                        u32 offset = msr - MSR_IA32_MC0_CTL;
-                       /* only 0 or all 1s can be written to IA32_MCi_CTL */
+                       /* only 0 or all 1s can be written to IA32_MCi_CTL
+                        * some Linux kernels though clear bit 10 in bank 4 to
+                        * workaround a BIOS/GART TBL issue on AMD K8s, ignore
+                        * this to avoid an uncatched #GP in the guest
+                        */
                        if ((offset & 0x3) == 0 &&
-                           data != 0 && data != ~(u64)0)
+                           data != 0 && (data | (1 << 10)) != ~(u64)0)
                                return -1;
                        vcpu->arch.mce_banks[offset] = data;
                        break;
@@ -2635,8 +2612,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                                      struct kvm_dirty_log *log)
 {
-       int r, n, i;
+       int r, i;
        struct kvm_memory_slot *memslot;
+       unsigned long n;
        unsigned long is_dirty = 0;
        unsigned long *dirty_bitmap = NULL;
 
@@ -2651,7 +2629,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        if (!memslot->dirty_bitmap)
                goto out;
 
-       n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+       n = kvm_dirty_bitmap_bytes(memslot);
 
        r = -ENOMEM;
        dirty_bitmap = vmalloc(n);
@@ -4483,7 +4461,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                kvm_set_cr8(vcpu, kvm_run->cr8);
 
        if (vcpu->arch.pio.cur_count) {
+               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = complete_pio(vcpu);
+               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
                if (r)
                        goto out;
        }
@@ -5146,6 +5126,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
        int ret = 0;
        u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
        u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
+       u32 desc_limit;
 
        old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
 
@@ -5168,7 +5149,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
                }
        }
 
-       if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
+       desc_limit = get_desc_limit(&nseg_desc);
+       if (!nseg_desc.p ||
+           ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
+            desc_limit < 0x2b)) {
                kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
                return 1;
        }
index 7e59dc1..2bdf628 100644 (file)
@@ -115,7 +115,7 @@ static void async_hcall(unsigned long call, unsigned long arg1,
        local_irq_save(flags);
        if (lguest_data.hcall_status[next_call] != 0xFF) {
                /* Table full, so do normal hcall which will flush table. */
-               kvm_hypercall4(call, arg1, arg2, arg3, arg4);
+               hcall(call, arg1, arg2, arg3, arg4);
        } else {
                lguest_data.hcalls[next_call].arg0 = call;
                lguest_data.hcalls[next_call].arg1 = arg1;
@@ -145,46 +145,45 @@ static void async_hcall(unsigned long call, unsigned long arg1,
  * So, when we're in lazy mode, we call async_hcall() to store the call for
  * future processing:
  */
-static void lazy_hcall1(unsigned long call,
-                      unsigned long arg1)
+static void lazy_hcall1(unsigned long call, unsigned long arg1)
 {
        if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
-               kvm_hypercall1(call, arg1);
+               hcall(call, arg1, 0, 0, 0);
        else
                async_hcall(call, arg1, 0, 0, 0);
 }
 
 /* You can imagine what lazy_hcall2, 3 and 4 look like. :*/
 static void lazy_hcall2(unsigned long call,
-                      unsigned long arg1,
-                      unsigned long arg2)
+                       unsigned long arg1,
+                       unsigned long arg2)
 {
        if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
-               kvm_hypercall2(call, arg1, arg2);
+               hcall(call, arg1, arg2, 0, 0);
        else
                async_hcall(call, arg1, arg2, 0, 0);
 }
 
 static void lazy_hcall3(unsigned long call,
-                      unsigned long arg1,
-                      unsigned long arg2,
-                      unsigned long arg3)
+                       unsigned long arg1,
+                       unsigned long arg2,
+                       unsigned long arg3)
 {
        if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
-               kvm_hypercall3(call, arg1, arg2, arg3);
+               hcall(call, arg1, arg2, arg3, 0);
        else
                async_hcall(call, arg1, arg2, arg3, 0);
 }
 
 #ifdef CONFIG_X86_PAE
 static void lazy_hcall4(unsigned long call,
-                      unsigned long arg1,
-                      unsigned long arg2,
-                      unsigned long arg3,
-                      unsigned long arg4)
+                       unsigned long arg1,
+                       unsigned long arg2,
+                       unsigned long arg3,
+                       unsigned long arg4)
 {
        if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
-               kvm_hypercall4(call, arg1, arg2, arg3, arg4);
+               hcall(call, arg1, arg2, arg3, arg4);
        else
                async_hcall(call, arg1, arg2, arg3, arg4);
 }
@@ -196,13 +195,13 @@ static void lazy_hcall4(unsigned long call,
 :*/
 static void lguest_leave_lazy_mmu_mode(void)
 {
-       kvm_hypercall0(LHCALL_FLUSH_ASYNC);
+       hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
        paravirt_leave_lazy_mmu();
 }
 
 static void lguest_end_context_switch(struct task_struct *next)
 {
-       kvm_hypercall0(LHCALL_FLUSH_ASYNC);
+       hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
        paravirt_end_context_switch(next);
 }
 
@@ -286,7 +285,7 @@ static void lguest_write_idt_entry(gate_desc *dt,
        /* Keep the local copy up to date. */
        native_write_idt_entry(dt, entrynum, g);
        /* Tell Host about this new entry. */
-       kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]);
+       hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0);
 }
 
 /*
@@ -300,7 +299,7 @@ static void lguest_load_idt(const struct desc_ptr *desc)
        struct desc_struct *idt = (void *)desc->address;
 
        for (i = 0; i < (desc->size+1)/8; i++)
-               kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b);
+               hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0);
 }
 
 /*
@@ -321,7 +320,7 @@ static void lguest_load_gdt(const struct desc_ptr *desc)
        struct desc_struct *gdt = (void *)desc->address;
 
        for (i = 0; i < (desc->size+1)/8; i++)
-               kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b);
+               hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0);
 }
 
 /*
@@ -334,8 +333,8 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
 {
        native_write_gdt_entry(dt, entrynum, desc, type);
        /* Tell Host about this new entry. */
-       kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum,
-                      dt[entrynum].a, dt[entrynum].b);
+       hcall(LHCALL_LOAD_GDT_ENTRY, entrynum,
+             dt[entrynum].a, dt[entrynum].b, 0);
 }
 
 /*
@@ -931,7 +930,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta,
        }
 
        /* Please wake us this far in the future. */
-       kvm_hypercall1(LHCALL_SET_CLOCKEVENT, delta);
+       hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0);
        return 0;
 }
 
@@ -942,7 +941,7 @@ static void lguest_clockevent_set_mode(enum clock_event_mode mode,
        case CLOCK_EVT_MODE_UNUSED:
        case CLOCK_EVT_MODE_SHUTDOWN:
                /* A 0 argument shuts the clock down. */
-               kvm_hypercall0(LHCALL_SET_CLOCKEVENT);
+               hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
                break;
        case CLOCK_EVT_MODE_ONESHOT:
                /* This is what we expect. */
@@ -1100,7 +1099,7 @@ static void set_lguest_basic_apic_ops(void)
 /* STOP!  Until an interrupt comes in. */
 static void lguest_safe_halt(void)
 {
-       kvm_hypercall0(LHCALL_HALT);
+       hcall(LHCALL_HALT, 0, 0, 0, 0);
 }
 
 /*
@@ -1112,8 +1111,8 @@ static void lguest_safe_halt(void)
  */
 static void lguest_power_off(void)
 {
-       kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"),
-                                       LGUEST_SHUTDOWN_POWEROFF);
+       hcall(LHCALL_SHUTDOWN, __pa("Power down"),
+             LGUEST_SHUTDOWN_POWEROFF, 0, 0);
 }
 
 /*
@@ -1123,7 +1122,7 @@ static void lguest_power_off(void)
  */
 static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
 {
-       kvm_hypercall2(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF);
+       hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0);
        /* The hcall won't return, but to keep gcc happy, we're "done". */
        return NOTIFY_DONE;
 }
@@ -1162,7 +1161,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
                len = sizeof(scratch) - 1;
        scratch[len] = '\0';
        memcpy(scratch, buf, len);
-       kvm_hypercall1(LHCALL_NOTIFY, __pa(scratch));
+       hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0);
 
        /* This routine returns the number of bytes actually written. */
        return len;
@@ -1174,7 +1173,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
  */
 static void lguest_restart(char *reason)
 {
-       kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
+       hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
 }
 
 /*G:050
index 27eac0f..4f420c2 100644 (file)
@@ -32,7 +32,7 @@ ENTRY(lguest_entry)
         */
        movl $LHCALL_LGUEST_INIT, %eax
        movl $lguest_data - __PAGE_OFFSET, %ebx
-       .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */
+       int $LGUEST_TRAP_ENTRY
 
        /* Set up the initial stack so we can run C code. */
        movl $(init_thread_union+THREAD_SIZE),%esp
index a610ebe..2fbfe51 100644 (file)
@@ -471,13 +471,18 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
                /* allow full data read from EC address space */
                if (obj_desc->field.region_obj->region.space_id ==
                        ACPI_ADR_SPACE_EC) {
-                       if (obj_desc->common_field.bit_length > 8)
-                               obj_desc->common_field.access_bit_width =
-                               ACPI_ROUND_UP(obj_desc->common_field.
-                                                       bit_length, 8);
+                       if (obj_desc->common_field.bit_length > 8) {
+                               unsigned width =
+                                       ACPI_ROUND_BITS_UP_TO_BYTES(
+                                       obj_desc->common_field.bit_length);
+                               // access_bit_width is u8, don't overflow it
+                               if (width > 8)
+                                       width = 8;
                                obj_desc->common_field.access_byte_width =
-                               ACPI_DIV_8(obj_desc->common_field.
-                                                       access_bit_width);
+                                                       width;
+                               obj_desc->common_field.access_bit_width =
+                                                       8 * width;
+                       }
                }
 
                ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
index 9f6cfac..228740f 100644 (file)
@@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
+       struct request_queue *q = qc->scsicmd->device->request_queue;
+       unsigned long flags;
 
        WARN_ON(!ap->ops->error_handler);
 
@@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
         * Note that ATA_QCFLAG_FAILED is unconditionally set after
         * this function completes.
         */
+       spin_lock_irqsave(q->queue_lock, flags);
        blk_abort_request(qc->scsicmd->request);
+       spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
 /**
@@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
        }
 
        /* okay, this error is ours */
+       memset(&tf, 0, sizeof(tf));
        rc = ata_eh_read_log_10h(dev, &tag, &tf);
        if (rc) {
                ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
index 3c3172d..4164dd2 100644 (file)
@@ -424,6 +424,8 @@ static struct pcmcia_device_id pcmcia_devices[] = {
        PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
        PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
        PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+       PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17),
+       PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
        PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
        PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2      ", 0x547e66dc, 0x8671043b),
        PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
@@ -444,6 +446,8 @@ static struct pcmcia_device_id pcmcia_devices[] = {
        PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
        PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
        PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
+       PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d),
+       PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
        PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
        PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
        PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
index d41331b..aa4248e 100644 (file)
@@ -1817,8 +1817,6 @@ static int intel_845_configure(void)
        pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
        /* clear any possible error conditions */
        pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
-
-       intel_i830_setup_flush();
        return 0;
 }
 
@@ -2188,7 +2186,6 @@ static const struct agp_bridge_driver intel_845_driver = {
        .agp_destroy_page       = agp_generic_destroy_page,
        .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
-       .chipset_flush          = intel_i830_chipset_flush,
 };
 
 static const struct agp_bridge_driver intel_850_driver = {
index c9bc896..90b199f 100644 (file)
@@ -1026,14 +1026,16 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
 
        xoutb(0, REG_FLAGS1(iobase));   /* clear detectCMM */
        /* last check before exit */
-       if (!io_detect_cm4000(iobase, dev))
-               count = -ENODEV;
+       if (!io_detect_cm4000(iobase, dev)) {
+               rc = -ENODEV;
+               goto release_io;
+       }
 
        if (test_bit(IS_INVREV, &dev->flags) && count > 0)
                str_invert_revert(dev->rbuf, count);
 
        if (copy_to_user(buf, dev->rbuf, count))
-               return -EFAULT;
+               rc = -EFAULT;
 
 release_io:
        clear_bit(LOCK_IO, &dev->flags);
index 702dcc9..14a34d9 100644 (file)
@@ -960,6 +960,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
                u.packet.header_length = GET_HEADER_LENGTH(control);
 
                if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
+                       if (u.packet.header_length % 4 != 0)
+                               return -EINVAL;
                        header_length = u.packet.header_length;
                } else {
                        /*
@@ -969,7 +971,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
                        if (ctx->header_size == 0) {
                                if (u.packet.header_length > 0)
                                        return -EINVAL;
-                       } else if (u.packet.header_length % ctx->header_size != 0) {
+                       } else if (u.packet.header_length == 0 ||
+                                  u.packet.header_length % ctx->header_size != 0) {
                                return -EINVAL;
                        }
                        header_length = 0;
@@ -1354,24 +1357,24 @@ static int dispatch_ioctl(struct client *client,
                return -ENODEV;
 
        if (_IOC_TYPE(cmd) != '#' ||
-           _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
+           _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
+           _IOC_SIZE(cmd) > sizeof(buffer))
                return -EINVAL;
 
-       if (_IOC_DIR(cmd) & _IOC_WRITE) {
-               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
-                   copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
+       if (_IOC_DIR(cmd) == _IOC_READ)
+               memset(&buffer, 0, _IOC_SIZE(cmd));
+
+       if (_IOC_DIR(cmd) & _IOC_WRITE)
+               if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
                        return -EFAULT;
-       }
 
        ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
        if (ret < 0)
                return ret;
 
-       if (_IOC_DIR(cmd) & _IOC_READ) {
-               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
-                   copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
+       if (_IOC_DIR(cmd) & _IOC_READ)
+               if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
                        return -EFAULT;
-       }
 
        return ret;
 }
index 3784a47..8f5aebf 100644 (file)
@@ -190,7 +190,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
        for (try = 0; try < 5; try++) {
                new = allocate ? old - bandwidth : old + bandwidth;
                if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
-                       break;
+                       return -EBUSY;
 
                data[0] = cpu_to_be32(old);
                data[1] = cpu_to_be32(new);
@@ -218,7 +218,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
                u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
 {
        __be32 c, all, old;
-       int i, retry = 5;
+       int i, ret = -EIO, retry = 5;
 
        old = all = allocate ? cpu_to_be32(~0) : 0;
 
@@ -226,6 +226,8 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
                if (!(channels_mask & 1 << i))
                        continue;
 
+               ret = -EBUSY;
+
                c = cpu_to_be32(1 << (31 - i));
                if ((old & c) != (all & c))
                        continue;
@@ -251,12 +253,16 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
 
                        /* 1394-1995 IRM, fall through to retry. */
                default:
-                       if (retry--)
+                       if (retry) {
+                               retry--;
                                i--;
+                       } else {
+                               ret = -EIO;
+                       }
                }
        }
 
-       return -EIO;
+       return ret;
 }
 
 static void deallocate_channel(struct fw_card *card, int irm_id,
index 0cf4d7f..94b16e0 100644 (file)
@@ -1158,7 +1158,7 @@ static void handle_local_lock(struct fw_ohci *ohci,
                              struct fw_packet *packet, u32 csr)
 {
        struct fw_packet response;
-       int tcode, length, ext_tcode, sel;
+       int tcode, length, ext_tcode, sel, try;
        __be32 *payload, lock_old;
        u32 lock_arg, lock_data;
 
@@ -1185,21 +1185,26 @@ static void handle_local_lock(struct fw_ohci *ohci,
        reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
        reg_write(ohci, OHCI1394_CSRControl, sel);
 
-       if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
-               lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
-       else
-               fw_notify("swap not done yet\n");
+       for (try = 0; try < 20; try++)
+               if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
+                       lock_old = cpu_to_be32(reg_read(ohci,
+                                                       OHCI1394_CSRData));
+                       fw_fill_response(&response, packet->header,
+                                        RCODE_COMPLETE,
+                                        &lock_old, sizeof(lock_old));
+                       goto out;
+               }
+
+       fw_error("swap not done (CSR lock timeout)\n");
+       fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
 
-       fw_fill_response(&response, packet->header,
-                        RCODE_COMPLETE, &lock_old, sizeof(lock_old));
  out:
        fw_core_handle_response(&ohci->card, &response);
 }
 
 static void handle_local_request(struct context *ctx, struct fw_packet *packet)
 {
-       u64 offset;
-       u32 csr;
+       u64 offset, csr;
 
        if (ctx == &ctx->ohci->at_request_ctx) {
                packet->ack = ACK_PENDING;
index b743411..a0c365f 100644 (file)
@@ -516,8 +516,6 @@ void drm_put_dev(struct drm_device *dev)
        }
        driver = dev->driver;
 
-       drm_vblank_cleanup(dev);
-
        drm_lastclose(dev);
 
        if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
@@ -537,6 +535,8 @@ void drm_put_dev(struct drm_device *dev)
                dev->agp = NULL;
        }
 
+       drm_vblank_cleanup(dev);
+
        list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
                drm_rmmap(dev, r_list->map);
        drm_ht_remove(&dev->map_hash);
index b574503..a0b8447 100644 (file)
@@ -226,7 +226,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
                } else {
                        struct drm_i915_gem_object *obj_priv;
 
-                       obj_priv = obj->driver_private;
+                       obj_priv = to_intel_bo(obj);
                        seq_printf(m, "Fenced object[%2d] = %p: %s "
                                   "%08x %08zx %08x %s %08x %08x %d",
                                   i, obj, get_pin_flag(obj_priv),
index 2dc9393..c3cfafc 100644 (file)
@@ -1357,6 +1357,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
 
        dev_priv->cfb_size = size;
 
+       dev_priv->compressed_fb = compressed_fb;
+
        if (IS_GM45(dev)) {
                g4x_disable_fbc(dev);
                I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1364,12 +1366,22 @@ static void i915_setup_compression(struct drm_device *dev, int size)
                i8xx_disable_fbc(dev);
                I915_WRITE(FBC_CFB_BASE, cfb_base);
                I915_WRITE(FBC_LL_BASE, ll_base);
+               dev_priv->compressed_llb = compressed_llb;
        }
 
        DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
                  ll_base, size >> 20);
 }
 
+static void i915_cleanup_compression(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       drm_mm_put_block(dev_priv->compressed_fb);
+       if (!IS_GM45(dev))
+               drm_mm_put_block(dev_priv->compressed_llb);
+}
+
 /* true = enable decode, false = disable decoder */
 static unsigned int i915_vga_set_decode(void *cookie, bool state)
 {
@@ -1787,6 +1799,8 @@ int i915_driver_unload(struct drm_device *dev)
                mutex_lock(&dev->struct_mutex);
                i915_gem_cleanup_ringbuffer(dev);
                mutex_unlock(&dev->struct_mutex);
+               if (I915_HAS_FBC(dev) && i915_powersave)
+                       i915_cleanup_compression(dev);
                drm_mm_takedown(&dev_priv->vram);
                i915_gem_lastclose(dev);
 
index 4b26919..cc03537 100644 (file)
@@ -69,7 +69,8 @@ const static struct intel_device_info intel_845g_info = {
 };
 
 const static struct intel_device_info intel_i85x_info = {
-       .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+       .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+       .cursor_needs_physical = 1,
 };
 
 const static struct intel_device_info intel_i865g_info = {
@@ -80,14 +81,14 @@ const static struct intel_device_info intel_i915g_info = {
        .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
 };
 const static struct intel_device_info intel_i915gm_info = {
-       .is_i9xx = 1,  .is_mobile = 1, .has_fbc = 1,
+       .is_i9xx = 1,  .is_mobile = 1,
        .cursor_needs_physical = 1,
 };
 const static struct intel_device_info intel_i945g_info = {
        .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
 };
 const static struct intel_device_info intel_i945gm_info = {
-       .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
+       .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
        .has_hotplug = 1, .cursor_needs_physical = 1,
 };
 
@@ -151,7 +152,7 @@ const static struct pci_device_id pciidlist[] = {
        INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
        INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
        INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
-       INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
+       INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
        INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
        INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
        INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
@@ -361,7 +362,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
            !dev_priv->mm.suspended) {
                drm_i915_ring_buffer_t *ring = &dev_priv->ring;
                struct drm_gem_object *obj = ring->ring_obj;
-               struct drm_i915_gem_object *obj_priv = obj->driver_private;
+               struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
                dev_priv->mm.suspended = 0;
 
                /* Stop the ring if it's running. */
index aba8260..6e47900 100644 (file)
@@ -195,6 +195,7 @@ struct intel_overlay;
 struct intel_device_info {
        u8 is_mobile : 1;
        u8 is_i8xx : 1;
+       u8 is_i85x : 1;
        u8 is_i915g : 1;
        u8 is_i9xx : 1;
        u8 is_i945gm : 1;
@@ -235,11 +236,14 @@ typedef struct drm_i915_private {
 
        drm_dma_handle_t *status_page_dmah;
        void *hw_status_page;
+       void *seqno_page;
        dma_addr_t dma_status_page;
        uint32_t counter;
        unsigned int status_gfx_addr;
+       unsigned int seqno_gfx_addr;
        drm_local_map_t hws_map;
        struct drm_gem_object *hws_obj;
+       struct drm_gem_object *seqno_obj;
        struct drm_gem_object *pwrctx;
 
        struct resource mch_res;
@@ -611,6 +615,8 @@ typedef struct drm_i915_private {
        /* Reclocking support */
        bool render_reclock_avail;
        bool lvds_downclock_avail;
+       /* indicate whether the LVDS EDID is OK */
+       bool lvds_edid_good;
        /* indicates the reduced downclock for LVDS*/
        int lvds_downclock;
        struct work_struct idle_work;
@@ -628,6 +634,9 @@ typedef struct drm_i915_private {
        u8 max_delay;
 
        enum no_fbc_reason no_fbc_reason;
+
+       struct drm_mm_node *compressed_fb;
+       struct drm_mm_node *compressed_llb;
 } drm_i915_private_t;
 
 /** driver private structure attached to each drm_gem_object */
@@ -731,6 +740,8 @@ struct drm_i915_gem_object {
        atomic_t pending_flip;
 };
 
+#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
+
 /**
  * Request queue structure.
  *
@@ -1066,7 +1077,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 
 #define IS_I830(dev)           ((dev)->pci_device == 0x3577)
 #define IS_845G(dev)           ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev)           ((dev)->pci_device == 0x3582)
+#define IS_I85X(dev)           (INTEL_INFO(dev)->is_i85x)
 #define IS_I865G(dev)          ((dev)->pci_device == 0x2572)
 #define IS_GEN2(dev)           (INTEL_INFO(dev)->is_i8xx)
 #define IS_I915G(dev)          (INTEL_INFO(dev)->is_i915g)
@@ -1131,6 +1142,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 
 #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) ||        \
                            IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
 
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 
index 368d726..ef3d91d 100644 (file)
@@ -163,7 +163,7 @@ fast_shmem_read(struct page **pages,
 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
 {
        drm_i915_private_t *dev_priv = obj->dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
                obj_priv->tiling_mode != I915_TILING_NONE;
@@ -264,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
                          struct drm_i915_gem_pread *args,
                          struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        ssize_t remain;
        loff_t offset, page_base;
        char __user *user_data;
@@ -285,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
        if (ret != 0)
                goto fail_put_pages;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        offset = args->offset;
 
        while (remain > 0) {
@@ -354,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
                          struct drm_i915_gem_pread *args,
                          struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct mm_struct *mm = current->mm;
        struct page **user_pages;
        ssize_t remain;
@@ -403,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
        if (ret != 0)
                goto fail_put_pages;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        offset = args->offset;
 
        while (remain > 0) {
@@ -479,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL)
                return -EBADF;
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        /* Bounds check source.
         *
@@ -581,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
                         struct drm_i915_gem_pwrite *args,
                         struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        drm_i915_private_t *dev_priv = dev->dev_private;
        ssize_t remain;
        loff_t offset, page_base;
@@ -605,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        if (ret)
                goto fail;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        offset = obj_priv->gtt_offset + args->offset;
 
        while (remain > 0) {
@@ -655,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                         struct drm_i915_gem_pwrite *args,
                         struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        drm_i915_private_t *dev_priv = dev->dev_private;
        ssize_t remain;
        loff_t gtt_page_base, offset;
@@ -699,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
        if (ret)
                goto out_unpin_object;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        offset = obj_priv->gtt_offset + args->offset;
 
        while (remain > 0) {
@@ -761,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
                           struct drm_i915_gem_pwrite *args,
                           struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        ssize_t remain;
        loff_t offset, page_base;
        char __user *user_data;
@@ -781,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        if (ret != 0)
                goto fail_put_pages;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        offset = args->offset;
        obj_priv->dirty = 1;
 
@@ -829,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                           struct drm_i915_gem_pwrite *args,
                           struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct mm_struct *mm = current->mm;
        struct page **user_pages;
        ssize_t remain;
@@ -877,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
        if (ret != 0)
                goto fail_put_pages;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        offset = args->offset;
        obj_priv->dirty = 1;
 
@@ -952,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL)
                return -EBADF;
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        /* Bounds check destination.
         *
@@ -1034,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL)
                return -EBADF;
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        mutex_lock(&dev->struct_mutex);
 
@@ -1096,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        DRM_INFO("%s: sw_finish %d (%p %zd)\n",
                 __func__, args->handle, obj, obj->size);
 #endif
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        /* Pinned buffers may be scanout, so flush the cache */
        if (obj_priv->pin_count)
@@ -1167,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct drm_gem_object *obj = vma->vm_private_data;
        struct drm_device *dev = obj->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        pgoff_t page_offset;
        unsigned long pfn;
        int ret = 0;
@@ -1234,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_map_list *list;
        struct drm_local_map *map;
        int ret = 0;
@@ -1305,7 +1305,7 @@ void
 i915_gem_release_mmap(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        if (dev->dev_mapping)
                unmap_mapping_range(dev->dev_mapping,
@@ -1316,7 +1316,7 @@ static void
 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_gem_mm *mm = dev->mm_private;
        struct drm_map_list *list;
 
@@ -1347,7 +1347,7 @@ static uint32_t
 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int start, i;
 
        /*
@@ -1406,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 
        mutex_lock(&dev->struct_mutex);
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to mmap a purgeable buffer\n");
@@ -1450,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 void
 i915_gem_object_put_pages(struct drm_gem_object *obj)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int page_count = obj->size / PAGE_SIZE;
        int i;
 
@@ -1486,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        /* Add a reference if we're newly entering the active list. */
        if (!obj_priv->active) {
@@ -1506,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        BUG_ON(!obj_priv->active);
        list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
@@ -1517,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
 static void
 i915_gem_object_truncate(struct drm_gem_object *obj)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct inode *inode;
 
        inode = obj->filp->f_path.dentry->d_inode;
@@ -1538,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
        if (obj_priv->pin_count != 0)
@@ -1588,6 +1588,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
        }
 }
 
+#define PIPE_CONTROL_FLUSH(addr)                                       \
+       OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |          \
+                PIPE_CONTROL_DEPTH_STALL);                             \
+       OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);                       \
+       OUT_RING(0);                                                    \
+       OUT_RING(0);                                                    \
+
 /**
  * Creates a new sequence number, emitting a write of it to the status page
  * plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1622,13 +1629,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        if (dev_priv->mm.next_gem_seqno == 0)
                dev_priv->mm.next_gem_seqno++;
 
-       BEGIN_LP_RING(4);
-       OUT_RING(MI_STORE_DWORD_INDEX);
-       OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       OUT_RING(seqno);
+       if (HAS_PIPE_CONTROL(dev)) {
+               u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
 
-       OUT_RING(MI_USER_INTERRUPT);
-       ADVANCE_LP_RING();
+               /*
+                * Workaround qword write incoherence by flushing the
+                * PIPE_NOTIFY buffers out to memory before requesting
+                * an interrupt.
+                */
+               BEGIN_LP_RING(32);
+               OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+                        PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+               OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+               OUT_RING(seqno);
+               OUT_RING(0);
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128; /* write to separate cachelines */
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128;
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128;
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128;
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128;
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+                        PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+                        PIPE_CONTROL_NOTIFY);
+               OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+               OUT_RING(seqno);
+               OUT_RING(0);
+               ADVANCE_LP_RING();
+       } else {
+               BEGIN_LP_RING(4);
+               OUT_RING(MI_STORE_DWORD_INDEX);
+               OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+               OUT_RING(seqno);
+
+               OUT_RING(MI_USER_INTERRUPT);
+               ADVANCE_LP_RING();
+       }
 
        DRM_DEBUG_DRIVER("%d\n", seqno);
 
@@ -1752,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+       if (HAS_PIPE_CONTROL(dev))
+               return ((volatile u32 *)(dev_priv->seqno_page))[0];
+       else
+               return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
 }
 
 /**
@@ -1965,7 +2009,7 @@ static int
 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
 
        /* This function only exists to support waiting for existing rendering,
@@ -1997,7 +2041,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret = 0;
 
 #if WATCH_BUF
@@ -2173,7 +2217,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 #if WATCH_LRU
                        DRM_INFO("%s: evicting %p\n", __func__, obj);
 #endif
-                       obj_priv = obj->driver_private;
+                       obj_priv = to_intel_bo(obj);
                        BUG_ON(obj_priv->pin_count != 0);
                        BUG_ON(obj_priv->active);
 
@@ -2244,7 +2288,7 @@ int
 i915_gem_object_get_pages(struct drm_gem_object *obj,
                          gfp_t gfpmask)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int page_count, i;
        struct address_space *mapping;
        struct inode *inode;
@@ -2297,7 +2341,7 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
        struct drm_gem_object *obj = reg->obj;
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int regnum = obj_priv->fence_reg;
        uint64_t val;
 
@@ -2319,7 +2363,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
        struct drm_gem_object *obj = reg->obj;
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int regnum = obj_priv->fence_reg;
        uint64_t val;
 
@@ -2339,7 +2383,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
        struct drm_gem_object *obj = reg->obj;
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int regnum = obj_priv->fence_reg;
        int tile_width;
        uint32_t fence_reg, val;
@@ -2362,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
        pitch_val = obj_priv->stride / tile_width;
        pitch_val = ffs(pitch_val) - 1;
 
+       if (obj_priv->tiling_mode == I915_TILING_Y &&
+           HAS_128_BYTE_Y_TILING(dev))
+               WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
+       else
+               WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
+
        val = obj_priv->gtt_offset;
        if (obj_priv->tiling_mode == I915_TILING_Y)
                val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2381,7 +2431,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
        struct drm_gem_object *obj = reg->obj;
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int regnum = obj_priv->fence_reg;
        uint32_t val;
        uint32_t pitch_val;
@@ -2425,7 +2475,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
                if (!reg->obj)
                        return i;
 
-               obj_priv = reg->obj->driver_private;
+               obj_priv = to_intel_bo(reg->obj);
                if (!obj_priv->pin_count)
                    avail++;
        }
@@ -2480,7 +2530,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_i915_fence_reg *reg = NULL;
        int ret;
 
@@ -2547,7 +2597,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        if (IS_GEN6(dev)) {
                I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2583,7 +2633,7 @@ int
 i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
                return 0;
@@ -2621,7 +2671,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_mm_node *free_space;
        gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN;
        int ret;
@@ -2728,7 +2778,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
 void
 i915_gem_clflush_object(struct drm_gem_object *obj)
 {
-       struct drm_i915_gem_object      *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
 
        /* If we don't have a page list set up, then we're not pinned
         * to GPU, and we can ignore the cache flush because it'll happen
@@ -2829,7 +2879,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
 int
 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
@@ -2879,7 +2929,7 @@ int
 i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
@@ -3092,7 +3142,7 @@ static void
 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 {
        struct drm_device               *dev = obj->dev;
-       struct drm_i915_gem_object      *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
        uint32_t                        invalidate_domains = 0;
        uint32_t                        flush_domains = 0;
        uint32_t                        old_read_domains;
@@ -3177,7 +3227,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 static void
 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        if (!obj_priv->page_cpu_valid)
                return;
@@ -3217,7 +3267,7 @@ static int
 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
                                          uint64_t offset, uint64_t size)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        uint32_t old_read_domains;
        int i, ret;
 
@@ -3286,7 +3336,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int i, ret;
        void __iomem *reloc_page;
        bool need_fence;
@@ -3337,7 +3387,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                        i915_gem_object_unpin(obj);
                        return -EBADF;
                }
-               target_obj_priv = target_obj->driver_private;
+               target_obj_priv = to_intel_bo(target_obj);
 
 #if WATCH_RELOC
                DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3689,7 +3739,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
                prepare_to_wait(&dev_priv->pending_flip_queue,
                                &wait, TASK_INTERRUPTIBLE);
                for (i = 0; i < count; i++) {
-                       obj_priv = object_list[i]->driver_private;
+                       obj_priv = to_intel_bo(object_list[i]);
                        if (atomic_read(&obj_priv->pending_flip) > 0)
                                break;
                }
@@ -3798,7 +3848,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                        goto err;
                }
 
-               obj_priv = object_list[i]->driver_private;
+               obj_priv = to_intel_bo(object_list[i]);
                if (obj_priv->in_execbuffer) {
                        DRM_ERROR("Object %p appears more than once in object list\n",
                                   object_list[i]);
@@ -3924,7 +3974,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
-               struct drm_i915_gem_object *obj_priv = obj->driver_private;
+               struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
                uint32_t old_write_domain = obj->write_domain;
 
                obj->write_domain = obj->pending_write_domain;
@@ -3999,7 +4049,7 @@ err:
 
        for (i = 0; i < args->buffer_count; i++) {
                if (object_list[i]) {
-                       obj_priv = object_list[i]->driver_private;
+                       obj_priv = to_intel_bo(object_list[i]);
                        obj_priv->in_execbuffer = false;
                }
                drm_gem_object_unreference(object_list[i]);
@@ -4177,7 +4227,7 @@ int
 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4210,7 +4260,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
        obj_priv->pin_count--;
@@ -4250,7 +4300,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
                mutex_unlock(&dev->struct_mutex);
                return -EBADF;
        }
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to pin a purgeable buffer\n");
@@ -4307,7 +4357,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
                return -EBADF;
        }
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        if (obj_priv->pin_filp != file_priv) {
                DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
@@ -4349,7 +4399,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
         */
        i915_gem_retire_requests(dev);
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        /* Don't count being on the flushing list against the object being
         * done.  Otherwise, a buffer left on the flushing list but not getting
         * flushed (because nobody's flushing that domain) won't ever return
@@ -4395,7 +4445,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        }
 
        mutex_lock(&dev->struct_mutex);
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        if (obj_priv->pin_count) {
                drm_gem_object_unreference(obj);
@@ -4456,7 +4506,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
 void i915_gem_free_object(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        trace_i915_gem_object_destroy(obj);
 
@@ -4546,6 +4596,49 @@ i915_gem_idle(struct drm_device *dev)
        return 0;
 }
 
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+static int
+i915_gem_init_pipe_control(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       int ret;
+
+       obj = drm_gem_object_alloc(dev, 4096);
+       if (obj == NULL) {
+               DRM_ERROR("Failed to allocate seqno page\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+       obj_priv = to_intel_bo(obj);
+       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+       ret = i915_gem_object_pin(obj, 4096);
+       if (ret)
+               goto err_unref;
+
+       dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
+       dev_priv->seqno_page =  kmap(obj_priv->pages[0]);
+       if (dev_priv->seqno_page == NULL)
+               goto err_unpin;
+
+       dev_priv->seqno_obj = obj;
+       memset(dev_priv->seqno_page, 0, PAGE_SIZE);
+
+       return 0;
+
+err_unpin:
+       i915_gem_object_unpin(obj);
+err_unref:
+       drm_gem_object_unreference(obj);
+err:
+       return ret;
+}
+
 static int
 i915_gem_init_hws(struct drm_device *dev)
 {
@@ -4563,15 +4656,16 @@ i915_gem_init_hws(struct drm_device *dev)
        obj = drm_gem_object_alloc(dev, 4096);
        if (obj == NULL) {
                DRM_ERROR("Failed to allocate status page\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err;
        }
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
 
        ret = i915_gem_object_pin(obj, 4096);
        if (ret != 0) {
                drm_gem_object_unreference(obj);
-               return ret;
+               goto err_unref;
        }
 
        dev_priv->status_gfx_addr = obj_priv->gtt_offset;
@@ -4580,10 +4674,16 @@ i915_gem_init_hws(struct drm_device *dev)
        if (dev_priv->hw_status_page == NULL) {
                DRM_ERROR("Failed to map status page.\n");
                memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-               i915_gem_object_unpin(obj);
-               drm_gem_object_unreference(obj);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_unpin;
        }
+
+       if (HAS_PIPE_CONTROL(dev)) {
+               ret = i915_gem_init_pipe_control(dev);
+               if (ret)
+                       goto err_unpin;
+       }
+
        dev_priv->hws_obj = obj;
        memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
        if (IS_GEN6(dev)) {
@@ -4596,6 +4696,30 @@ i915_gem_init_hws(struct drm_device *dev)
        DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
 
        return 0;
+
+err_unpin:
+       i915_gem_object_unpin(obj);
+err_unref:
+       drm_gem_object_unreference(obj);
+err:
+       return 0;
+}
+
+static void
+i915_gem_cleanup_pipe_control(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+
+       obj = dev_priv->seqno_obj;
+       obj_priv = to_intel_bo(obj);
+       kunmap(obj_priv->pages[0]);
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(obj);
+       dev_priv->seqno_obj = NULL;
+
+       dev_priv->seqno_page = NULL;
 }
 
 static void
@@ -4609,7 +4733,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
                return;
 
        obj = dev_priv->hws_obj;
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        kunmap(obj_priv->pages[0]);
        i915_gem_object_unpin(obj);
@@ -4619,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
        memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
        dev_priv->hw_status_page = NULL;
 
+       if (HAS_PIPE_CONTROL(dev))
+               i915_gem_cleanup_pipe_control(dev);
+
        /* Write high address into HWS_PGA when disabling. */
        I915_WRITE(HWS_PGA, 0x1ffff000);
 }
@@ -4643,7 +4770,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
                i915_gem_cleanup_hws(dev);
                return -ENOMEM;
        }
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        ret = i915_gem_object_pin(obj, 4096);
        if (ret != 0) {
@@ -4936,7 +5063,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
        int ret;
        int page_count;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
        if (!obj_priv->phys_obj)
                return;
 
@@ -4975,7 +5102,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
        if (id > I915_MAX_PHYS_OBJECT)
                return -EINVAL;
 
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        if (obj_priv->phys_obj) {
                if (obj_priv->phys_obj->id == id)
@@ -5026,7 +5153,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                     struct drm_i915_gem_pwrite *args,
                     struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        void *obj_addr;
        int ret;
        char __user *user_data;
index e602614..35507cf 100644 (file)
@@ -72,7 +72,7 @@ void
 i915_gem_dump_object(struct drm_gem_object *obj, int len,
                     const char *where, uint32_t mark)
 {
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int page;
 
        DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
@@ -137,7 +137,7 @@ void
 i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int page;
        uint32_t *gtt_mapping;
        uint32_t *backing_map = NULL;
index c01c878..4bdccef 100644 (file)
@@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
                 * reg, so dont bother to check the size */
                if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
                        return false;
-       } else if (IS_I9XX(dev)) {
-               uint32_t pitch_val = ffs(stride / tile_width) - 1;
-
-               /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
-                * instead of 4 (2KB) on 945s.
-                */
-               if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
-                   size > (I830_FENCE_MAX_SIZE_VAL << 20))
+       } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
+               if (stride > 8192)
                        return false;
-       } else {
-               uint32_t pitch_val = ffs(stride / tile_width) - 1;
 
-               if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
-                   size > (I830_FENCE_MAX_SIZE_VAL << 19))
-                       return false;
+               if (IS_GEN3(dev)) {
+                       if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+                               return false;
+               } else {
+                       if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+                               return false;
+               }
        }
 
        /* 965+ just needs multiples of tile width */
@@ -240,7 +236,7 @@ bool
 i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        if (obj_priv->gtt_space == NULL)
                return true;
@@ -280,7 +276,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL)
                return -EINVAL;
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
                drm_gem_object_unreference_unlocked(obj);
@@ -364,7 +360,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL)
                return -EINVAL;
-       obj_priv = obj->driver_private;
+       obj_priv = to_intel_bo(obj);
 
        mutex_lock(&dev->struct_mutex);
 
@@ -427,7 +423,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int page_count = obj->size >> PAGE_SHIFT;
        int i;
 
@@ -456,7 +452,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int page_count = obj->size >> PAGE_SHIFT;
        int i;
 
index 49c458b..2b8b969 100644 (file)
@@ -260,10 +260,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
 
        if (mode_config->num_connector) {
                list_for_each_entry(connector, &mode_config->connector_list, head) {
-                       struct intel_output *intel_output = to_intel_output(connector);
+                       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
        
-                       if (intel_output->hot_plug)
-                               (*intel_output->hot_plug) (intel_output);
+                       if (intel_encoder->hot_plug)
+                               (*intel_encoder->hot_plug) (intel_encoder);
                }
        }
        /* Just fire off a uevent and let userspace tell us what to do */
@@ -349,7 +349,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
                                READ_BREADCRUMB(dev_priv);
        }
 
-       if (gt_iir & GT_USER_INTERRUPT) {
+       if (gt_iir & GT_PIPE_NOTIFY) {
                u32 seqno = i915_get_gem_seqno(dev);
                dev_priv->mm.irq_gem_seqno = seqno;
                trace_i915_gem_request_complete(dev, seqno);
@@ -444,7 +444,7 @@ i915_error_object_create(struct drm_device *dev,
        if (src == NULL)
                return NULL;
 
-       src_priv = src->driver_private;
+       src_priv = to_intel_bo(src);
        if (src_priv->pages == NULL)
                return NULL;
 
@@ -1005,7 +1005,7 @@ void i915_user_irq_get(struct drm_device *dev)
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
        if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
                if (HAS_PCH_SPLIT(dev))
-                       ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+                       ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
                else
                        i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
        }
@@ -1021,7 +1021,7 @@ void i915_user_irq_put(struct drm_device *dev)
        BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
        if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
                if (HAS_PCH_SPLIT(dev))
-                       ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+                       ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
                else
                        i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
        }
@@ -1305,7 +1305,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        /* enable kind of interrupts always enabled */
        u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
                           DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
-       u32 render_mask = GT_USER_INTERRUPT;
+       u32 render_mask = GT_PIPE_NOTIFY;
        u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
                           SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;