Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 27 Oct 2010 01:57:59 +0000 (18:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 27 Oct 2010 01:57:59 +0000 (18:57 -0700)
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (476 commits)
  vmwgfx: Implement a proper GMR eviction mechanism
  drm/radeon/kms: fix r6xx/7xx 1D tiling CS checker v2
  drm/radeon/kms: properly compute group_size on 6xx/7xx
  drm/radeon/kms: fix 2D tile height alignment in the r600 CS checker
  drm/radeon/kms/evergreen: set the clear state to the blit state
  drm/radeon/kms: don't poll dac load detect.
  gpu: Add Intel GMA500(Poulsbo) Stub Driver
  drm/radeon/kms: MC vram map needs to be >= pci aperture size
  drm/radeon/kms: implement display watermark support for evergreen
  drm/radeon/kms/evergreen: add some additional safe regs v2
  drm/radeon/r600: fix tiling issues in CS checker.
  drm/i915: Move gpu_write_list to per-ring
  drm/i915: Invalidate the to-ring, flush the old-ring when updating domains
  drm/i915/ringbuffer: Write the value passed in to the tail register
  agp/intel: Restore valid PTE bit for Sandybridge after bdd3072
  drm/i915: Fix flushing regression from 9af90d19f
  drm/i915/sdvo: Remove unused encoding member
  i915: enable AVI infoframe for intel_hdmi.c [v4]
  drm/i915: Fix current fb blocking for page flip
  drm/i915: IS_IRONLAKE is synonymous with gen == 5
  ...

Fix up conflicts in
 - drivers/gpu/drm/i915/{i915_gem.c, i915/intel_overlay.c}: due to the
   new simplified stack-based kmap_atomic() interface
 - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c: added .llseek entry due to BKL
   removal cleanups.

25 files changed:
1  2 
MAINTAINERS
drivers/char/agp/generic.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/i810/i810_drv.c
drivers/gpu/drm/i830/i830_drv.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/mga/mga_drv.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_i2c.h
drivers/gpu/drm/r128/r128_drv.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/savage/savage_drv.c
drivers/gpu/drm/sis/sis_drv.c
drivers/gpu/drm/tdfx/tdfx_drv.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/via/via_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/video/Kconfig

diff --combined MAINTAINERS
@@@ -157,11 -157,9 +157,11 @@@ S:       Maintaine
  F:    drivers/net/r8169.c
  
  8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
 +M:    Greg Kroah-Hartman <gregkh@suse.de>
  L:    linux-serial@vger.kernel.org
  W:    http://serial.sourceforge.net
 -S:    Orphan
 +S:    Maintained
 +T:    quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
  F:    drivers/serial/8250*
  F:    include/linux/serial_8250.h
  
@@@ -243,6 -241,21 +243,6 @@@ F:        drivers/pnp/pnpacpi
  F:    include/linux/acpi.h
  F:    include/acpi/
  
 -ACPI BATTERY DRIVERS
 -M:    Alexey Starikovskiy <astarikovskiy@suse.de>
 -L:    linux-acpi@vger.kernel.org
 -W:    http://www.lesswatts.org/projects/acpi/
 -S:    Supported
 -F:    drivers/acpi/battery.c
 -F:    drivers/acpi/*sbs*
 -
 -ACPI EC DRIVER
 -M:    Alexey Starikovskiy <astarikovskiy@suse.de>
 -L:    linux-acpi@vger.kernel.org
 -W:    http://www.lesswatts.org/projects/acpi/
 -S:    Supported
 -F:    drivers/acpi/ec.c
 -
  ACPI FAN DRIVER
  M:    Zhang Rui <rui.zhang@intel.com>
  L:    linux-acpi@vger.kernel.org
@@@ -642,7 -655,7 +642,7 @@@ ARM/FARADAY FA526 POR
  M:    Hans Ulli Kroll <ulli.kroll@googlemail.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 -T:    git://git.berlios.de/gemini-board
 +T:    git git://git.berlios.de/gemini-board
  F:    arch/arm/mm/*-fa*
  
  ARM/FOOTBRIDGE ARCHITECTURE
@@@ -657,7 -670,7 +657,7 @@@ ARM/FREESCALE IMX / MXC ARM ARCHITECTUR
  M:    Sascha Hauer <kernel@pengutronix.de>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 -T:    git://git.pengutronix.de/git/imx/linux-2.6.git
 +T:    git git://git.pengutronix.de/git/imx/linux-2.6.git
  F:    arch/arm/mach-mx*/
  F:    arch/arm/plat-mxc/
  
@@@ -695,7 -708,8 +695,7 @@@ ARM/INCOME PXA270 SUPPOR
  M:    Marek Vasut <marek.vasut@gmail.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 -F:    arch/arm/mach-pxa/income.c
 -F:    arch/arm/mach-pxa/include/mach-pxa/income.h
 +F:    arch/arm/mach-pxa/colibri-pxa270-income.c
  
  ARM/INTEL IOP32X ARM ARCHITECTURE
  M:    Lennert Buytenhek <kernel@wantstofly.org>
@@@ -742,7 -756,13 +742,7 @@@ L:        linux-arm-kernel@lists.infradead.or
  S:    Maintained
  F:    arch/arm/mach-ixp4xx/
  
 -ARM/INTEL RESEARCH IMOTE 2 MACHINE SUPPORT
 -M:    Jonathan Cameron <jic23@cam.ac.uk>
 -L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -S:    Maintained
 -F:    arch/arm/mach-pxa/imote2.c
 -
 -ARM/INTEL RESEARCH STARGATE 2 MACHINE SUPPORT
 +ARM/INTEL RESEARCH IMOTE/STARGATE 2 MACHINE SUPPORT
  M:    Jonathan Cameron <jic23@cam.ac.uk>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
@@@ -907,20 -927,40 +907,20 @@@ W:      http://www.fluff.org/ben/linux
  S:    Maintained
  F:    arch/arm/mach-s3c2410/
  
 -ARM/S3C2440 ARM ARCHITECTURE
 +ARM/S3C244x ARM ARCHITECTURE
  M:    Ben Dooks <ben-linux@fluff.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  W:    http://www.fluff.org/ben/linux/
  S:    Maintained
  F:    arch/arm/mach-s3c2440/
 -
 -ARM/S3C2442 ARM ARCHITECTURE
 -M:    Ben Dooks <ben-linux@fluff.org>
 -L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -W:    http://www.fluff.org/ben/linux/
 -S:    Maintained
 -F:    arch/arm/mach-s3c2442/
 -
 -ARM/S3C2443 ARM ARCHITECTURE
 -M:    Ben Dooks <ben-linux@fluff.org>
 -L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -W:    http://www.fluff.org/ben/linux/
 -S:    Maintained
  F:    arch/arm/mach-s3c2443/
  
 -ARM/S3C6400 ARM ARCHITECTURE
 -M:    Ben Dooks <ben-linux@fluff.org>
 -L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -W:    http://www.fluff.org/ben/linux/
 -S:    Maintained
 -F:    arch/arm/mach-s3c6400/
 -
 -ARM/S3C6410 ARM ARCHITECTURE
 +ARM/S3C64xx ARM ARCHITECTURE
  M:    Ben Dooks <ben-linux@fluff.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  W:    http://www.fluff.org/ben/linux/
  S:    Maintained
 -F:    arch/arm/mach-s3c6410/
 +F:    arch/arm/mach-s3c64xx/
  
  ARM/S5P ARM ARCHITECTURES
  M:    Kukjin Kim <kgene.kim@samsung.com>
@@@ -950,23 -990,11 +950,23 @@@ S:      Supporte
  F:    arch/arm/mach-shmobile/
  F:    drivers/sh/
  
 +ARM/TELECHIPS ARM ARCHITECTURE
 +M:    "Hans J. Koch" <hjk@linutronix.de>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    arch/arm/plat-tcc/
 +F:    arch/arm/mach-tcc8k/
 +
  ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
  M:    Lennert Buytenhek <kernel@wantstofly.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  
 +ARM/TETON BGA MACHINE SUPPORT
 +M:    Mark F. Brown <mark.brown314@gmail.com>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Maintained
 +
  ARM/THECUS N2100 MACHINE SUPPORT
  M:    Lennert Buytenhek <kernel@wantstofly.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@@ -1109,13 -1137,6 +1109,13 @@@ W:    http://wireless.kernel.org/en/users/
  S:    Maintained
  F:    drivers/net/wireless/ath/ar9170/
  
 +CARL9170 LINUX COMMUNITY WIRELESS DRIVER
 +M:    Christian Lamparter <chunkeey@googlemail.com>
 +L:    linux-wireless@vger.kernel.org
 +W:    http://wireless.kernel.org/en/users/Drivers/carl9170
 +S:    Maintained
 +F:    drivers/net/wireless/ath/carl9170/
 +
  ATK0110 HWMON DRIVER
  M:    Luca Tettamanti <kronos.it@gmail.com>
  L:    lm-sensors@lm-sensors.org
@@@ -1340,19 -1361,16 +1340,19 @@@ F:   drivers/mtd/devices/block2mtd.
  
  BLUETOOTH DRIVERS
  M:    Marcel Holtmann <marcel@holtmann.org>
 +M:    Gustavo F. Padovan <padovan@profusion.mobi>
  L:    linux-bluetooth@vger.kernel.org
  W:    http://www.bluez.org/
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth-2.6.git
  S:    Maintained
  F:    drivers/bluetooth/
  
  BLUETOOTH SUBSYSTEM
  M:    Marcel Holtmann <marcel@holtmann.org>
 +M:    Gustavo F. Padovan <padovan@profusion.mobi>
  L:    linux-bluetooth@vger.kernel.org
  W:    http://www.bluez.org/
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/holtmann/bluetooth-2.6.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth-2.6.git
  S:    Maintained
  F:    net/bluetooth/
  F:    include/net/bluetooth/
@@@ -1397,13 -1415,6 +1397,13 @@@ L:    linux-scsi@vger.kernel.or
  S:    Supported
  F:    drivers/scsi/bfa/
  
 +BROCADE BNA 10 GIGABIT ETHERNET DRIVER
 +M:    Rasesh Mody <rmody@brocade.com>
 +M:    Debashis Dutt <ddutt@brocade.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/bna/
 +
  BSG (block layer generic sg v4 driver)
  M:    FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
  L:    linux-scsi@vger.kernel.org
@@@ -1516,12 -1527,11 +1516,12 @@@ T:   git git://git.kernel.org/pub/scm/lin
  S:    Supported
  F:    Documentation/filesystems/ceph.txt
  F:    fs/ceph
 +F:    net/ceph
 +F:    include/linux/ceph
  
  CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
 -M:    David Vrabel <david.vrabel@csr.com>
  L:    linux-usb@vger.kernel.org
 -S:    Supported
 +S:    Orphan
  F:    Documentation/usb/WUSB-Design-overview.txt
  F:    Documentation/usb/wusb-cbaf
  F:    drivers/usb/host/hwa-hc.c
@@@ -1560,9 -1570,9 +1560,9 @@@ S:      Supporte
  F:    scripts/checkpatch.pl
  
  CISCO VIC ETHERNET NIC DRIVER
 -M:    Scott Feldman <scofeldm@cisco.com>
  M:    Vasanthy Kolluri <vkolluri@cisco.com>
  M:    Roopa Prabhu <roprabhu@cisco.com>
 +M:    David Wang <dwang2@cisco.com>
  S:    Supported
  F:    drivers/net/enic/
  
@@@ -2040,16 -2050,14 +2040,16 @@@ F:   drivers/block/drbd
  F:    lib/lru_cache.c
  F:    Documentation/blockdev/drbd/
  
 -DRIVER CORE, KOBJECTS, AND SYSFS
 +DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
  M:    Greg Kroah-Hartman <gregkh@suse.de>
  T:    quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
  S:    Supported
  F:    Documentation/kobject.txt
  F:    drivers/base/
  F:    fs/sysfs/
 +F:    fs/debugfs/
  F:    include/linux/kobj*
 +F:    include/linux/debugfs.h
  F:    lib/kobj*
  
  DRM DRIVERS
@@@ -2060,6 -2068,15 +2060,15 @@@ S:    Maintaine
  F:    drivers/gpu/drm/
  F:    include/drm/
  
+ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
+ M:    Chris Wilson <chris@chris-wilson.co.uk>
+ L:    intel-gfx@lists.freedesktop.org
+ L:    dri-devel@lists.freedesktop.org
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git
+ S:    Supported
+ F:    drivers/gpu/drm/i915
+ F:    include/drm/i915*
  DSCC4 DRIVER
  M:    Francois Romieu <romieu@fr.zoreil.com>
  L:    netdev@vger.kernel.org
@@@ -2169,13 -2186,6 +2178,13 @@@ W:    bluesmoke.sourceforge.ne
  S:    Maintained
  F:    drivers/edac/i5400_edac.c
  
 +EDAC-I7300
 +M:    Mauro Carvalho Chehab <mchehab@redhat.com>
 +L:    linux-edac@vger.kernel.org
 +W:    bluesmoke.sourceforge.net
 +S:    Maintained
 +F:    drivers/edac/i7300_edac.c
 +
  EDAC-I7CORE
  M:    Mauro Carvalho Chehab <mchehab@redhat.com>
  L:    linux-edac@vger.kernel.org
@@@ -2603,10 -2613,10 +2612,10 @@@ F:   drivers/net/greth
  
  HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
  M:    Frank Seidel <frank@f-seidel.de>
 -L:    lm-sensors@lm-sensors.org
 +L:    platform-driver-x86@vger.kernel.org
  W:    http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
  S:    Maintained
 -F:    drivers/hwmon/hdaps.c
 +F:    drivers/platform/x86/hdaps.c
  
  HWPOISON MEMORY FAILURE HANDLING
  M:    Andi Kleen <andi@firstfloor.org>
@@@ -2903,12 -2913,6 +2912,12 @@@ M:    Brian King <brking@us.ibm.com
  S:    Supported
  F:    drivers/scsi/ipr.*
  
 +IBM Power Virtual Ethernet Device Driver
 +M:    Santiago Leon <santil@linux.vnet.ibm.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/ibmveth.*
 +
  IBM ServeRAID RAID DRIVER
  P:    Jack Hammer
  M:    Dave Jeffery <ipslinux@adaptec.com>
@@@ -2973,7 -2977,7 +2982,7 @@@ M:      Roland Dreier <rolandd@cisco.com
  M:    Sean Hefty <sean.hefty@intel.com>
  M:    Hal Rosenstock <hal.rosenstock@gmail.com>
  L:    linux-rdma@vger.kernel.org
 -W:    http://www.openib.org/
 +W:    http://www.openfabrics.org/
  Q:    http://patchwork.kernel.org/project/linux-rdma/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
  S:    Supported
@@@ -3167,7 -3171,7 +3176,7 @@@ F:      drivers/net/ioc3-eth.
  
  IOC3 SERIAL DRIVER
  M:    Pat Gefre <pfg@sgi.com>
 -L:    linux-mips@linux-mips.org
 +L:    linux-serial@vger.kernel.org
  S:    Maintained
  F:    drivers/serial/ioc3_serial.c
  
@@@ -3244,12 -3248,6 +3253,12 @@@ F:    drivers/net/irda
  F:    include/net/irda/
  F:    net/irda/
  
 +IRQ SUBSYSTEM
 +M:    Thomas Gleixner <tglx@linutronix.de>
 +S:    Maintained
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
 +F:    kernel/irq/
 +
  ISAPNP
  M:    Jaroslav Kysela <perex@perex.cz>
  S:    Maintained
@@@ -3342,12 -3340,6 +3351,12 @@@ F:    fs/jbd*
  F:    include/linux/ext*jbd*.h
  F:    include/linux/jbd*.h
  
 +JSM Neo PCI based serial card
 +M:    Breno Leitao <leitao@linux.vnet.ibm.com>
 +L:    linux-serial@vger.kernel.org
 +S:    Maintained
 +F:    drivers/serial/jsm/
 +
  K8TEMP HARDWARE MONITORING DRIVER
  M:    Rudolf Marek <r.marek@assembler.cz>
  L:    lm-sensors@lm-sensors.org
@@@ -3398,7 -3390,7 +3407,7 @@@ F:      scripts/package
  
  KERNEL JANITORS
  L:    kernel-janitors@vger.kernel.org
 -W:    http://janitor.kernelnewbies.org/
 +W:    http://kernelnewbies.org/KernelJanitors
  S:    Odd Fixes
  
  KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
@@@ -3723,13 -3715,6 +3732,13 @@@ L:    linux-scsi@vger.kernel.or
  S:    Maintained
  F:    drivers/scsi/sym53c8xx_2/
  
 +LTC4261 HARDWARE MONITOR DRIVER
 +M:    Guenter Roeck <linux@roeck-us.net>
 +L:    lm-sensors@lm-sensors.org
 +S:    Maintained
 +F:    Documentation/hwmon/ltc4261
 +F:    drivers/hwmon/ltc4261.c
 +
  LTP (Linux Test Project)
  M:    Rishikesh K Rajak <risrajak@linux.vnet.ibm.com>
  M:    Garrett Cooper <yanegomi@gmail.com>
@@@ -3825,7 -3810,7 +3834,7 @@@ F:      drivers/net/wireless/mwl8k.
  MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
  M:    Nicolas Pitre <nico@fluxnic.net>
  S:    Odd Fixes
 -F: drivers/mmc/host/mvsdio.*
 +F:    drivers/mmc/host/mvsdio.*
  
  MARVELL YUKON / SYSKONNECT DRIVER
  M:    Mirko Lindner <mlindner@syskonnect.de>
@@@ -4386,12 -4371,13 +4395,12 @@@ F:   Documentation/filesystems/dlmfs.tx
  F:    fs/ocfs2/
  
  ORINOCO DRIVER
 -M:    Pavel Roskin <proski@gnu.org>
 -M:    David Gibson <hermes@gibson.dropbear.id.au>
  L:    linux-wireless@vger.kernel.org
  L:    orinoco-users@lists.sourceforge.net
  L:    orinoco-devel@lists.sourceforge.net
 +W:    http://linuxwireless.org/en/users/Drivers/orinoco
  W:    http://www.nongnu.org/orinoco/
 -S:    Maintained
 +S:    Orphan
  F:    drivers/net/wireless/orinoco/
  
  OSD LIBRARY and FILESYSTEM
@@@ -4424,15 -4410,6 +4433,15 @@@ L:    linux-i2c@vger.kernel.or
  S:    Maintained
  F:    drivers/i2c/busses/i2c-pasemi.c
  
 +PADATA PARALLEL EXECUTION MECHANISM
 +M:    Steffen Klassert <steffen.klassert@secunet.com>
 +L:    linux-kernel@vger.kernel.org
 +L:    linux-crypto@vger.kernel.org
 +S:    Maintained
 +F:    kernel/padata.c
 +F:    include/linux/padata.h
 +F:    Documentation/padata.txt
 +
  PANASONIC LAPTOP ACPI EXTRAS DRIVER
  M:    Harald Welte <laforge@gnumonks.org>
  L:    platform-driver-x86@vger.kernel.org
@@@ -4512,12 -4489,6 +4521,12 @@@ S:    Maintaine
  F:    drivers/leds/leds-pca9532.c
  F:    include/linux/leds-pca9532.h
  
 +PCA9541 I2C BUS MASTER SELECTOR DRIVER
 +M:    Guenter Roeck <guenter.roeck@ericsson.com>
 +L:    linux-i2c@vger.kernel.org
 +S:    Maintained
 +F:    drivers/i2c/muxes/pca9541.c
 +
  PCA9564/PCA9665 I2C BUS DRIVER
  M:    Wolfram Sang <w.sang@pengutronix.de>
  L:    linux-i2c@vger.kernel.org
@@@ -4566,13 -4537,6 +4575,13 @@@ L:    netdev@vger.kernel.or
  S:    Maintained
  F:    drivers/net/pcnet32.c
  
 +PCRYPT PARALLEL CRYPTO ENGINE
 +M:    Steffen Klassert <steffen.klassert@secunet.com>
 +L:    linux-crypto@vger.kernel.org
 +S:    Maintained
 +F:    crypto/pcrypt.c
 +F:    include/crypto/pcrypt.h
 +
  PER-TASK DELAY ACCOUNTING
  M:    Balbir Singh <balbir@linux.vnet.ibm.com>
  S:    Maintained
@@@ -4601,14 -4565,6 +4610,14 @@@ L:    linux-abi-devel@lists.sourceforge.ne
  S:    Maintained
  F:    include/linux/personality.h
  
 +PHONET PROTOCOL
 +M:    Remi Denis-Courmont <remi.denis-courmont@nokia.com>
 +S:    Supported
 +F:    Documentation/networking/phonet.txt
 +F:    include/linux/phonet.h
 +F:    include/net/phonet/
 +F:    net/phonet/
 +
  PHRAM MTD DRIVER
  M:    Joern Engel <joern@lazybastard.org>
  L:    linux-mtd@lists.infradead.org
@@@ -4858,15 -4814,6 +4867,15 @@@ F:    fs/qnx4
  F:    include/linux/qnx4_fs.h
  F:    include/linux/qnxtypes.h
  
 +RADOS BLOCK DEVICE (RBD)
 +F:    include/linux/qnxtypes.h
 +M:    Yehuda Sadeh <yehuda@hq.newdream.net>
 +M:    Sage Weil <sage@newdream.net>
 +M:    ceph-devel@vger.kernel.org
 +S:    Supported
 +F:    drivers/block/rbd.c
 +F:    drivers/block/rbd_types.h
 +
  RADEON FRAMEBUFFER DISPLAY DRIVER
  M:    Benjamin Herrenschmidt <benh@kernel.crashing.org>
  L:    linux-fbdev@vger.kernel.org
@@@ -4916,7 -4863,7 +4925,7 @@@ RCUTORTURE MODUL
  M:    Josh Triplett <josh@freedesktop.org>
  M:    "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
  S:    Supported
 -T:    git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
  F:    Documentation/RCU/torture.txt
  F:    kernel/rcutorture.c
  
@@@ -4941,7 -4888,7 +4950,7 @@@ M:      Dipankar Sarma <dipankar@in.ibm.com
  M:    "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
  W:    http://www.rdrop.com/users/paulmck/rclock/
  S:    Supported
 -T:    git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
  F:    Documentation/RCU/
  F:    include/linux/rcu*
  F:    include/linux/srcu*
@@@ -5131,16 -5078,6 +5140,16 @@@ W:    http://www.kernel.d
  S:    Maintained
  F:    drivers/scsi/sr*
  
 +SCSI RDMA PROTOCOL (SRP) INITIATOR
 +M:    David Dillow <dillowda@ornl.gov>
 +L:    linux-rdma@vger.kernel.org
 +S:    Supported
 +W:    http://www.openfabrics.org
 +Q:    http://patchwork.kernel.org/project/linux-rdma/list/
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/dad/srp-initiator.git
 +F:    drivers/infiniband/ulp/srp/
 +F:    include/scsi/srp.h
 +
  SCSI SG DRIVER
  M:    Doug Gilbert <dgilbert@interlog.com>
  L:    linux-scsi@vger.kernel.org
@@@ -5363,8 -5300,8 +5372,8 @@@ F:      drivers/*/*s3c2410
  F:    drivers/*/*/*s3c2410*
  
  TI DAVINCI MACHINE SUPPORT
 -P:    Kevin Hilman
 -M:    davinci-linux-open-source@linux.davincidsp.com
 +M:    Kevin Hilman <khilman@deeprootsystems.com>
 +L:    davinci-linux-open-source@linux.davincidsp.com (subscribers-only)
  Q:    http://patchwork.kernel.org/project/linux-davinci/list/
  S:    Supported
  F:    arch/arm/mach-davinci
@@@ -5965,9 -5902,13 +5974,9 @@@ F:     Documentation/filesystems/ufs.tx
  F:    fs/ufs/
  
  ULTRA-WIDEBAND (UWB) SUBSYSTEM:
 -M:    David Vrabel <david.vrabel@csr.com>
  L:    linux-usb@vger.kernel.org
 -S:    Supported
 +S:    Orphan
  F:    drivers/uwb/
 -X:    drivers/uwb/wlp/
 -X:    drivers/uwb/i1480/i1480u-wlp/
 -X:    drivers/uwb/i1480/i1480-wlp.h
  F:    include/linux/uwb.h
  F:    include/linux/uwb/
  
@@@ -6002,14 -5943,6 +6011,14 @@@ S:    Maintaine
  F:    Documentation/usb/acm.txt
  F:    drivers/usb/class/cdc-acm.*
  
 +USB ATTACHED SCSI
 +M:    Matthew Wilcox <willy@linux.intel.com>
 +M:    Sarah Sharp <sarah.a.sharp@linux.intel.com>
 +L:    linux-usb@vger.kernel.org
 +L:    linux-scsi@vger.kernel.org
 +S:    Supported
 +F:    drivers/usb/storage/uas.c
 +
  USB BLOCK DRIVER (UB ub)
  M:    Pete Zaitcev <zaitcev@redhat.com>
  L:    linux-usb@vger.kernel.org
@@@ -6109,6 -6042,13 +6118,6 @@@ L:     linux-usb@vger.kernel.or
  S:    Maintained
  F:    drivers/usb/serial/option.c
  
 -USB OV511 DRIVER
 -M:    Mark McClelland <mmcclell@bigfoot.com>
 -L:    linux-usb@vger.kernel.org
 -W:    http://alpha.dyndns.org/ov511/
 -S:    Maintained
 -F:    drivers/media/video/ov511.*
 -
  USB PEGASUS DRIVER
  M:    Petko Manolov <petkan@users.sourceforge.net>
  L:    linux-usb@vger.kernel.org
@@@ -6269,6 -6209,16 +6278,6 @@@ S:     Supporte
  F:    drivers/usb/host/xhci*
  F:    drivers/usb/host/pci-quirks*
  
 -USB ZC0301 DRIVER
 -M:    Luca Risolia <luca.risolia@studio.unibo.it>
 -L:    linux-usb@vger.kernel.org
 -L:    linux-media@vger.kernel.org
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 -W:    http://www.linux-projects.org
 -S:    Maintained
 -F:    Documentation/video4linux/zc0301.txt
 -F:    drivers/media/video/zc0301/
 -
  USB ZD1201 DRIVER
  L:    linux-wireless@vger.kernel.org
  W:    http://linux-lc100020.sourceforge.net
@@@ -6486,27 -6436,36 +6495,27 @@@ F:   include/linux/wimax/debug.
  F:    include/net/wimax.h
  F:    net/wimax/
  
 -WIMEDIA LLC PROTOCOL (WLP) SUBSYSTEM
 -M:    David Vrabel <david.vrabel@csr.com>
 -L:    netdev@vger.kernel.org
 -S:    Maintained
 -F:    include/linux/wlp.h
 -F:    drivers/uwb/wlp/
 -F:    drivers/uwb/i1480/i1480u-wlp/
 -F:    drivers/uwb/i1480/i1480-wlp.h
 -
  WISTRON LAPTOP BUTTON DRIVER
  M:    Miloslav Trmac <mitr@volny.cz>
  S:    Maintained
  F:    drivers/input/misc/wistron_btns.c
  
  WL1251 WIRELESS DRIVER
 -M:    Kalle Valo <kalle.valo@iki.fi>
 +M:    Kalle Valo <kvalo@adurom.com>
  L:    linux-wireless@vger.kernel.org
  W:    http://wireless.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
  S:    Maintained
 -F:    drivers/net/wireless/wl12xx/*
 -X:    drivers/net/wireless/wl12xx/wl1271*
 +F:    drivers/net/wireless/wl1251/*
  
  WL1271 WIRELESS DRIVER
  M:    Luciano Coelho <luciano.coelho@nokia.com>
  L:    linux-wireless@vger.kernel.org
  W:    http://wireless.kernel.org
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
  S:    Maintained
  F:    drivers/net/wireless/wl12xx/wl1271*
 +F:    include/linux/wl12xx.h
  
  WL3501 WIRELESS PCMCIA CARD DRIVER
  M:    Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
@@@ -6653,20 -6612,6 +6662,20 @@@ M:    "Maciej W. Rozycki" <macro@linux-mip
  S:    Maintained
  F:    drivers/serial/zs.*
  
 +GRE DEMULTIPLEXER DRIVER
 +M:    Dmitry Kozlov <xeb@mail.ru>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    net/ipv4/gre.c
 +F:    include/net/gre.h
 +
 +PPTP DRIVER
 +M:    Dmitry Kozlov <xeb@mail.ru>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/pptp.c
 +W:    http://sourceforge.net/projects/accel-pptp
 +
  THE REST
  M:    Linus Torvalds <torvalds@linux-foundation.org>
  L:    linux-kernel@vger.kernel.org
@@@ -437,11 -437,6 +437,6 @@@ int agp_bind_memory(struct agp_memory *
                curr->is_flushed = true;
        }
  
-       if (curr->bridge->driver->agp_map_memory) {
-               ret_val = curr->bridge->driver->agp_map_memory(curr);
-               if (ret_val)
-                       return ret_val;
-       }
        ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
  
        if (ret_val != 0)
@@@ -483,9 -478,6 +478,6 @@@ int agp_unbind_memory(struct agp_memor
        if (ret_val != 0)
                return ret_val;
  
-       if (curr->bridge->driver->agp_unmap_memory)
-               curr->bridge->driver->agp_unmap_memory(curr);
        curr->is_bound = false;
        curr->pg_start = 0;
        spin_lock(&curr->bridge->mapped_lock);
@@@ -984,9 -976,7 +976,9 @@@ int agp_generic_create_gatt_table(struc
  
        bridge->driver->cache_flush();
  #ifdef CONFIG_X86
 -      set_memory_uc((unsigned long)table, 1 << page_order);
 +      if (set_memory_uc((unsigned long)table, 1 << page_order))
 +              printk(KERN_WARNING "Could not set GATT table memory to UC!");
 +
        bridge->gatt_table = (void *)table;
  #else
        bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
@@@ -91,8 -91,8 +91,8 @@@ static struct drm_ioctl_desc drm_ioctls
        DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
  
-       DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  
        DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
  
        DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
  
-       DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  
        DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
@@@ -180,10 -180,6 +180,6 @@@ int drm_lastclose(struct drm_device * d
  
        mutex_lock(&dev->struct_mutex);
  
-       /* Free drawable information memory */
-       drm_drawable_free_all(dev);
-       del_timer(&dev->timer);
        /* Clear AGP information */
        if (drm_core_has_AGP(dev) && dev->agp &&
                        !drm_core_check_feature(dev, DRIVER_MODESET)) {
@@@ -284,8 -280,7 +280,8 @@@ EXPORT_SYMBOL(drm_exit)
  /** File operations structure */
  static const struct file_operations drm_stub_fops = {
        .owner = THIS_MODULE,
 -      .open = drm_stub_open
 +      .open = drm_stub_open,
 +      .llseek = noop_llseek,
  };
  
  static int __init drm_core_init(void)
@@@ -52,8 -52,6 +52,6 @@@ static struct drm_driver driver = 
        .device_is_agp = i810_driver_device_is_agp,
        .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
        .dma_quiescent = i810_driver_dma_quiescent,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = i810_ioctls,
        .fops = {
                 .owner = THIS_MODULE,
@@@ -63,7 -61,6 +61,7 @@@
                 .mmap = drm_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
 +               .llseek = noop_llseek,
        },
  
        .pci_driver = {
@@@ -57,8 -57,6 +57,6 @@@ static struct drm_driver driver = 
        .device_is_agp = i830_driver_device_is_agp,
        .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
        .dma_quiescent = i830_driver_dma_quiescent,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
  #if USE_IRQS
        .irq_preinstall = i830_driver_irq_preinstall,
        .irq_postinstall = i830_driver_irq_postinstall,
@@@ -74,7 -72,6 +72,7 @@@
                 .mmap = drm_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
 +               .llseek = noop_llseek,
        },
  
        .pci_driver = {
  
  #if defined(CONFIG_DEBUG_FS)
  
- #define ACTIVE_LIST   1
- #define FLUSHING_LIST 2
- #define INACTIVE_LIST 3
+ enum {
+       ACTIVE_LIST,
+       FLUSHING_LIST,
+       INACTIVE_LIST,
+       PINNED_LIST,
+       DEFERRED_FREE_LIST,
+ };
+ static const char *yesno(int v)
+ {
+       return v ? "yes" : "no";
+ }
+ static int i915_capabilities(struct seq_file *m, void *data)
+ {
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       const struct intel_device_info *info = INTEL_INFO(dev);
+       seq_printf(m, "gen: %d\n", info->gen);
+ #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+       B(is_mobile);
+       B(is_i85x);
+       B(is_i915g);
+       B(is_i945gm);
+       B(is_g33);
+       B(need_gfx_hws);
+       B(is_g4x);
+       B(is_pineview);
+       B(is_broadwater);
+       B(is_crestline);
+       B(has_fbc);
+       B(has_rc6);
+       B(has_pipe_cxsr);
+       B(has_hotplug);
+       B(cursor_needs_physical);
+       B(has_overlay);
+       B(overlay_needs_physical);
+       B(supports_tv);
+       B(has_bsd_ring);
+       B(has_blt_ring);
+ #undef B
+       return 0;
+ }
  
  static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
  {
@@@ -64,6 -106,29 +106,29 @@@ static const char *get_tiling_flag(stru
      }
  }
  
+ static void
+ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+ {
+       seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
+                  &obj->base,
+                  get_pin_flag(obj),
+                  get_tiling_flag(obj),
+                  obj->base.size,
+                  obj->base.read_domains,
+                  obj->base.write_domain,
+                  obj->last_rendering_seqno,
+                  obj->dirty ? " dirty" : "",
+                  obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+       if (obj->base.name)
+               seq_printf(m, " (name: %d)", obj->base.name);
+       if (obj->fence_reg != I915_FENCE_REG_NONE)
+               seq_printf(m, " (fence: %d)", obj->fence_reg);
+       if (obj->gtt_space != NULL)
+               seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
+       if (obj->ring != NULL)
+               seq_printf(m, " (%s)", obj->ring->name);
+ }
  static int i915_gem_object_list_info(struct seq_file *m, void *data)
  {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv;
-       spinlock_t *lock = NULL;
+       size_t total_obj_size, total_gtt_size;
+       int count, ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
  
        switch (list) {
        case ACTIVE_LIST:
                seq_printf(m, "Active:\n");
-               lock = &dev_priv->mm.active_list_lock;
-               head = &dev_priv->render_ring.active_list;
+               head = &dev_priv->mm.active_list;
                break;
        case INACTIVE_LIST:
                seq_printf(m, "Inactive:\n");
                head = &dev_priv->mm.inactive_list;
                break;
+       case PINNED_LIST:
+               seq_printf(m, "Pinned:\n");
+               head = &dev_priv->mm.pinned_list;
+               break;
        case FLUSHING_LIST:
                seq_printf(m, "Flushing:\n");
                head = &dev_priv->mm.flushing_list;
                break;
+       case DEFERRED_FREE_LIST:
+               seq_printf(m, "Deferred free:\n");
+               head = &dev_priv->mm.deferred_free_list;
+               break;
        default:
-               DRM_INFO("Ooops, unexpected list\n");
-               return 0;
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
        }
  
-       if (lock)
-               spin_lock(lock);
-       list_for_each_entry(obj_priv, head, list)
-       {
-               seq_printf(m, "    %p: %s %8zd %08x %08x %d%s%s",
-                          &obj_priv->base,
-                          get_pin_flag(obj_priv),
-                          obj_priv->base.size,
-                          obj_priv->base.read_domains,
-                          obj_priv->base.write_domain,
-                          obj_priv->last_rendering_seqno,
-                          obj_priv->dirty ? " dirty" : "",
-                          obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
-               if (obj_priv->base.name)
-                       seq_printf(m, " (name: %d)", obj_priv->base.name);
-               if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-                       seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
-               if (obj_priv->gtt_space != NULL)
-                       seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
+       total_obj_size = total_gtt_size = count = 0;
+       list_for_each_entry(obj_priv, head, mm_list) {
+               seq_printf(m, "   ");
+               describe_obj(m, obj_priv);
                seq_printf(m, "\n");
+               total_obj_size += obj_priv->base.size;
+               total_gtt_size += obj_priv->gtt_space->size;
+               count++;
        }
+       mutex_unlock(&dev->struct_mutex);
  
-       if (lock)
-           spin_unlock(lock);
+       seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+                  count, total_obj_size, total_gtt_size);
        return 0;
  }
  
+ static int i915_gem_object_info(struct seq_file *m, void* data)
+ {
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+       seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
+       seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
+       seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
+       seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
+       seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
+       seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
+       seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+ }
  static int i915_gem_pageflip_info(struct seq_file *m, void *data)
  {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@@ -176,6 -265,11 +265,11 @@@ static int i915_gem_request_info(struc
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_request *gem_request;
+       int ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
  
        seq_printf(m, "Request:\n");
        list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
                           gem_request->seqno,
                           (int) (jiffies - gem_request->emitted_jiffies));
        }
+       mutex_unlock(&dev->struct_mutex);
        return 0;
  }
  
@@@ -192,16 -288,24 +288,24 @@@ static int i915_gem_seqno_info(struct s
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
  
        if (dev_priv->render_ring.status_page.page_addr != NULL) {
                seq_printf(m, "Current sequence: %d\n",
-                          i915_get_gem_seqno(dev,  &dev_priv->render_ring));
+                          dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
        } else {
                seq_printf(m, "Current sequence: hws uninitialized\n");
        }
        seq_printf(m, "Waiter sequence:  %d\n",
                        dev_priv->mm.waiting_gem_seqno);
        seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+       mutex_unlock(&dev->struct_mutex);
        return 0;
  }
  
@@@ -211,6 -315,11 +315,11 @@@ static int i915_interrupt_info(struct s
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
  
        if (!HAS_PCH_SPLIT(dev)) {
                seq_printf(m, "Interrupt enable:    %08x\n",
                   atomic_read(&dev_priv->irq_received));
        if (dev_priv->render_ring.status_page.page_addr != NULL) {
                seq_printf(m, "Current sequence:    %d\n",
-                          i915_get_gem_seqno(dev,  &dev_priv->render_ring));
+                          dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
        } else {
                seq_printf(m, "Current sequence:    hws uninitialized\n");
        }
                   dev_priv->mm.waiting_gem_seqno);
        seq_printf(m, "IRQ sequence:        %d\n",
                   dev_priv->mm.irq_gem_seqno);
+       mutex_unlock(&dev->struct_mutex);
        return 0;
  }
  
@@@ -263,7 -374,11 +374,11 @@@ static int i915_gem_fence_regs_info(str
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int i;
+       int i, ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
  
        seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
        seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
                        seq_printf(m, "\n");
                }
        }
+       mutex_unlock(&dev->struct_mutex);
  
        return 0;
  }
@@@ -313,16 -429,19 +429,19 @@@ static int i915_hws_info(struct seq_fil
        return 0;
  }
  
- static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
+ static void i915_dump_object(struct seq_file *m,
+                            struct io_mapping *mapping,
+                            struct drm_i915_gem_object *obj_priv)
  {
-       int page, i;
-       uint32_t *mem;
+       int page, page_count, i;
  
+       page_count = obj_priv->base.size / PAGE_SIZE;
        for (page = 0; page < page_count; page++) {
-               mem = kmap_atomic(pages[page], KM_USER0);
+               u32 *mem = io_mapping_map_wc(mapping,
+                                            obj_priv->gtt_offset + page * PAGE_SIZE);
                for (i = 0; i < PAGE_SIZE; i += 4)
                        seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
-               kunmap_atomic(mem, KM_USER0);
+               io_mapping_unmap(mem);
        }
  }
  
@@@ -335,27 -454,20 +454,20 @@@ static int i915_batchbuffer_info(struc
        struct drm_i915_gem_object *obj_priv;
        int ret;
  
-       spin_lock(&dev_priv->mm.active_list_lock);
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
  
-       list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
-                       list) {
+       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
                obj = &obj_priv->base;
                if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
-                   ret = i915_gem_object_get_pages(obj, 0);
-                   if (ret) {
-                           DRM_ERROR("Failed to get pages: %d\n", ret);
-                           spin_unlock(&dev_priv->mm.active_list_lock);
-                           return ret;
-                   }
-                   seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
-                   i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
-                   i915_gem_object_put_pages(obj);
+                   seq_printf(m, "--- gtt_offset = 0x%08x\n",
+                              obj_priv->gtt_offset);
+                   i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
                }
        }
  
-       spin_unlock(&dev_priv->mm.active_list_lock);
+       mutex_unlock(&dev->struct_mutex);
  
        return 0;
  }
@@@ -365,20 -477,24 +477,24 @@@ static int i915_ringbuffer_data(struct 
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       u8 *virt;
-       uint32_t *ptr, off;
+       int ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
  
        if (!dev_priv->render_ring.gem_object) {
                seq_printf(m, "No ringbuffer setup\n");
-               return 0;
-       }
-       virt = dev_priv->render_ring.virtual_start;
+       } else {
+               u8 *virt = dev_priv->render_ring.virtual_start;
+               uint32_t off;
  
-       for (off = 0; off < dev_priv->render_ring.size; off += 4) {
-               ptr = (uint32_t *)(virt + off);
-               seq_printf(m, "%08x :  %08x\n", off, *ptr);
+               for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+                       uint32_t *ptr = (uint32_t *)(virt + off);
+                       seq_printf(m, "%08x :  %08x\n", off, *ptr);
+               }
        }
+       mutex_unlock(&dev->struct_mutex);
  
        return 0;
  }
@@@ -396,7 -512,7 +512,7 @@@ static int i915_ringbuffer_info(struct 
        seq_printf(m, "RingHead :  %08x\n", head);
        seq_printf(m, "RingTail :  %08x\n", tail);
        seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
-       seq_printf(m, "Acthd :     %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
+       seq_printf(m, "Acthd :     %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
  
        return 0;
  }
@@@ -458,7 -574,7 +574,7 @@@ static int i915_error_state(struct seq_
        seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
        seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
        seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
-       if (IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
                seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
                seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
        }
@@@ -642,6 -758,9 +758,9 @@@ static int i915_fbc_status(struct seq_f
        } else {
                seq_printf(m, "FBC disabled: ");
                switch (dev_priv->no_fbc_reason) {
+               case FBC_NO_OUTPUT:
+                       seq_printf(m, "no outputs");
+                       break;
                case FBC_STOLEN_TOO_SMALL:
                        seq_printf(m, "not enough stolen memory");
                        break;
@@@ -675,15 -794,17 +794,17 @@@ static int i915_sr_status(struct seq_fi
        drm_i915_private_t *dev_priv = dev->dev_private;
        bool sr_enabled = false;
  
-       if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
+       if (IS_GEN5(dev))
+               sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
+       else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
                sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
        else if (IS_I915GM(dev))
                sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
        else if (IS_PINEVIEW(dev))
                sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
  
-       seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
-                  "disabled");
+       seq_printf(m, "self-refresh: %s\n",
+                  sr_enabled ? "enabled" : "disabled");
  
        return 0;
  }
@@@ -694,10 -815,16 +815,16 @@@ static int i915_emon_status(struct seq_
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        unsigned long temp, chipset, gfx;
+       int ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
  
        temp = i915_mch_val(dev_priv);
        chipset = i915_chipset_val(dev_priv);
        gfx = i915_gfx_val(dev_priv);
+       mutex_unlock(&dev->struct_mutex);
  
        seq_printf(m, "GMCH temp: %ld\n", temp);
        seq_printf(m, "Chipset power: %ld\n", chipset);
@@@ -718,6 -845,68 +845,68 @@@ static int i915_gfxec(struct seq_file *
        return 0;
  }
  
+ static int i915_opregion(struct seq_file *m, void *unused)
+ {
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_opregion *opregion = &dev_priv->opregion;
+       int ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+       if (opregion->header)
+               seq_write(m, opregion->header, OPREGION_SIZE);
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+ }
+ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+ {
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_fbdev *ifbdev;
+       struct intel_framebuffer *fb;
+       int ret;
+       ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+       if (ret)
+               return ret;
+       ifbdev = dev_priv->fbdev;
+       fb = to_intel_framebuffer(ifbdev->helper.fb);
+       seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
+                  fb->base.width,
+                  fb->base.height,
+                  fb->base.depth,
+                  fb->base.bits_per_pixel);
+       describe_obj(m, to_intel_bo(fb->obj));
+       seq_printf(m, "\n");
+       list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+               if (&fb->base == ifbdev->helper.fb)
+                       continue;
+               seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
+                          fb->base.width,
+                          fb->base.height,
+                          fb->base.depth,
+                          fb->base.bits_per_pixel);
+               describe_obj(m, to_intel_bo(fb->obj));
+               seq_printf(m, "\n");
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+       return 0;
+ }
  static int
  i915_wedged_open(struct inode *inode,
                 struct file *filp)
@@@ -741,6 -930,9 +930,9 @@@ i915_wedged_read(struct file *filp
                       "wedged :  %d\n",
                       atomic_read(&dev_priv->mm.wedged));
  
+       if (len > sizeof (buf))
+               len = sizeof (buf);
        return simple_read_from_buffer(ubuf, max, ppos, buf, len);
  }
  
@@@ -770,7 -962,7 +962,7 @@@ i915_wedged_write(struct file *filp
  
        atomic_set(&dev_priv->mm.wedged, val);
        if (val) {
-               DRM_WAKEUP(&dev_priv->irq_queue);
+               wake_up_all(&dev_priv->irq_queue);
                queue_work(dev_priv->wq, &dev_priv->error_work);
        }
  
@@@ -782,7 -974,6 +974,7 @@@ static const struct file_operations i91
        .open = i915_wedged_open,
        .read = i915_wedged_read,
        .write = i915_wedged_write,
 +      .llseek = default_llseek,
  };
  
  /* As the drm_debugfs_init() routines are called before dev->dev_private is
@@@ -824,9 -1015,13 +1016,13 @@@ static int i915_wedged_create(struct de
  }
  
  static struct drm_info_list i915_debugfs_list[] = {
+       {"i915_capabilities", i915_capabilities, 0, 0},
+       {"i915_gem_objects", i915_gem_object_info, 0},
        {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
        {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
        {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+       {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
+       {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
        {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
        {"i915_gem_request", i915_gem_request_info, 0},
        {"i915_gem_seqno", i915_gem_seqno_info, 0},
        {"i915_gfxec", i915_gfxec, 0},
        {"i915_fbc_status", i915_fbc_status, 0},
        {"i915_sr_status", i915_sr_status, 0},
+       {"i915_opregion", i915_opregion, 0},
+       {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
  };
  #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
  
@@@ -32,6 -32,7 +32,7 @@@
  #include "drm.h"
  #include "i915_drm.h"
  #include "i915_drv.h"
+ #include "intel_drv.h"
  
  #include <linux/console.h>
  #include "drm_crtc_helper.h"
@@@ -61,86 -62,110 +62,110 @@@ extern int intel_agp_enabled
        .driver_data = (unsigned long) info }
  
  static const struct intel_device_info intel_i830_info = {
-       .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+       .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
  };
  
  static const struct intel_device_info intel_845g_info = {
-       .gen = 2, .is_i8xx = 1,
+       .gen = 2,
+       .has_overlay = 1, .overlay_needs_physical = 1,
  };
  
  static const struct intel_device_info intel_i85x_info = {
-       .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+       .gen = 2, .is_i85x = 1, .is_mobile = 1,
        .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
  };
  
  static const struct intel_device_info intel_i865g_info = {
-       .gen = 2, .is_i8xx = 1,
+       .gen = 2,
+       .has_overlay = 1, .overlay_needs_physical = 1,
  };
  
  static const struct intel_device_info intel_i915g_info = {
-       .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+       .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
  };
  static const struct intel_device_info intel_i915gm_info = {
-       .gen = 3, .is_i9xx = 1,  .is_mobile = 1,
+       .gen = 3, .is_mobile = 1,
        .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .supports_tv = 1,
  };
  static const struct intel_device_info intel_i945g_info = {
-       .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+       .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
  };
  static const struct intel_device_info intel_i945gm_info = {
-       .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
+       .gen = 3, .is_i945gm = 1, .is_mobile = 1,
        .has_hotplug = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .supports_tv = 1,
  };
  
  static const struct intel_device_info intel_i965g_info = {
-       .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
+       .gen = 4, .is_broadwater = 1,
        .has_hotplug = 1,
+       .has_overlay = 1,
  };
  
  static const struct intel_device_info intel_i965gm_info = {
-       .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
+       .gen = 4, .is_crestline = 1,
        .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+       .has_overlay = 1,
+       .supports_tv = 1,
  };
  
  static const struct intel_device_info intel_g33_info = {
-       .gen = 3, .is_g33 = 1, .is_i9xx = 1,
+       .gen = 3, .is_g33 = 1,
        .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_overlay = 1,
  };
  
  static const struct intel_device_info intel_g45_info = {
-       .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+       .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
        .has_pipe_cxsr = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
  };
  
  static const struct intel_device_info intel_gm45_info = {
-       .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
+       .gen = 4, .is_g4x = 1,
        .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
        .has_pipe_cxsr = 1, .has_hotplug = 1,
+       .supports_tv = 1,
+       .has_bsd_ring = 1,
  };
  
  static const struct intel_device_info intel_pineview_info = {
-       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
        .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_overlay = 1,
  };
  
  static const struct intel_device_info intel_ironlake_d_info = {
-       .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
+       .gen = 5,
        .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
  };
  
  static const struct intel_device_info intel_ironlake_m_info = {
-       .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+       .gen = 5, .is_mobile = 1,
        .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
  };
  
  static const struct intel_device_info intel_sandybridge_d_info = {
-       .gen = 6, .is_i965g = 1, .is_i9xx = 1,
+       .gen = 6,
        .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
+       .has_blt_ring = 1,
  };
  
  static const struct intel_device_info intel_sandybridge_m_info = {
-       .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
+       .gen = 6, .is_mobile = 1,
        .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
+       .has_blt_ring = 1,
  };
  
  static const struct pci_device_id pciidlist[] = {             /* aka */
@@@ -237,7 -262,7 +262,7 @@@ static int i915_drm_freeze(struct drm_d
  
        i915_save_state(dev);
  
-       intel_opregion_free(dev, 1);
+       intel_opregion_fini(dev);
  
        /* Modeset on resume, not lid events */
        dev_priv->modeset_on_lid = 0;
@@@ -258,6 -283,8 +283,8 @@@ int i915_suspend(struct drm_device *dev
        if (state.event == PM_EVENT_PRETHAW)
                return 0;
  
+       drm_kms_helper_poll_disable(dev);
        error = i915_drm_freeze(dev);
        if (error)
                return error;
@@@ -277,8 -304,7 +304,7 @@@ static int i915_drm_thaw(struct drm_dev
        int error = 0;
  
        i915_restore_state(dev);
-       intel_opregion_init(dev, 1);
+       intel_opregion_setup(dev);
  
        /* KMS EnterVT equivalent */
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                drm_helper_resume_force_mode(dev);
        }
  
+       intel_opregion_init(dev);
        dev_priv->modeset_on_lid = 0;
  
        return error;
  
  int i915_resume(struct drm_device *dev)
  {
+       int ret;
        if (pci_enable_device(dev->pdev))
                return -EIO;
  
        pci_set_master(dev->pdev);
  
-       return i915_drm_thaw(dev);
+       ret = i915_drm_thaw(dev);
+       if (ret)
+               return ret;
+       drm_kms_helper_poll_enable(dev);
+       return 0;
+ }
+ static int i8xx_do_reset(struct drm_device *dev, u8 flags)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (IS_I85X(dev))
+               return -ENODEV;
+       I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+       POSTING_READ(D_STATE);
+       if (IS_I830(dev) || IS_845G(dev)) {
+               I915_WRITE(DEBUG_RESET_I830,
+                          DEBUG_RESET_DISPLAY |
+                          DEBUG_RESET_RENDER |
+                          DEBUG_RESET_FULL);
+               POSTING_READ(DEBUG_RESET_I830);
+               msleep(1);
+               I915_WRITE(DEBUG_RESET_I830, 0);
+               POSTING_READ(DEBUG_RESET_I830);
+       }
+       msleep(1);
+       I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+       POSTING_READ(D_STATE);
+       return 0;
+ }
+ static int i965_reset_complete(struct drm_device *dev)
+ {
+       u8 gdrst;
+       pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+       return gdrst & 0x1;
+ }
+ static int i965_do_reset(struct drm_device *dev, u8 flags)
+ {
+       u8 gdrst;
+       /*
+        * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+        * well as the reset bit (GR/bit 0).  Setting the GR bit
+        * triggers the reset; when done, the hardware will clear it.
+        */
+       pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+       pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
+       return wait_for(i965_reset_complete(dev), 500);
+ }
+ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+       I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
+       return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
  }
  
  /**
   *   - re-init interrupt state
   *   - re-init display
   */
- int i965_reset(struct drm_device *dev, u8 flags)
+ int i915_reset(struct drm_device *dev, u8 flags)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       unsigned long timeout;
-       u8 gdrst;
        /*
         * We really should only reset the display subsystem if we actually
         * need to
         */
        bool need_display = true;
+       int ret;
  
        mutex_lock(&dev->struct_mutex);
  
-       /*
-        * Clear request list
-        */
-       i915_gem_retire_requests(dev);
-       if (need_display)
-               i915_save_display(dev);
-       if (IS_I965G(dev) || IS_G4X(dev)) {
-               /*
-                * Set the domains we want to reset, then the reset bit (bit 0).
-                * Clear the reset bit after a while and wait for hardware status
-                * bit (bit 1) to be set
-                */
-               pci_read_config_byte(dev->pdev, GDRST, &gdrst);
-               pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
-               udelay(50);
-               pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
-               /* ...we don't want to loop forever though, 500ms should be plenty */
-              timeout = jiffies + msecs_to_jiffies(500);
-               do {
-                       udelay(100);
-                       pci_read_config_byte(dev->pdev, GDRST, &gdrst);
-               } while ((gdrst & 0x1) && time_after(timeout, jiffies));
-               if (gdrst & 0x1) {
-                       WARN(true, "i915: Failed to reset chip\n");
-                       mutex_unlock(&dev->struct_mutex);
-                       return -EIO;
-               }
-       } else {
-               DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
+       i915_gem_reset(dev);
+       ret = -ENODEV;
+       if (get_seconds() - dev_priv->last_gpu_reset < 5) {
+               DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
+       } else switch (INTEL_INFO(dev)->gen) {
+       case 5:
+               ret = ironlake_do_reset(dev, flags);
+               break;
+       case 4:
+               ret = i965_do_reset(dev, flags);
+               break;
+       case 2:
+               ret = i8xx_do_reset(dev, flags);
+               break;
+       }
+       dev_priv->last_gpu_reset = get_seconds();
+       if (ret) {
+               DRM_ERROR("Failed to reset chip.\n");
                mutex_unlock(&dev->struct_mutex);
-               return -ENODEV;
+               return ret;
        }
  
        /* Ok, now get things going again... */
                mutex_lock(&dev->struct_mutex);
        }
  
+       mutex_unlock(&dev->struct_mutex);
        /*
-        * Display needs restore too...
+        * Perform a full modeset as on later generations, e.g. Ironlake, we may
+        * need to retrain the display link and cannot just restore the register
+        * values.
         */
-       if (need_display)
-               i915_restore_display(dev);
+       if (need_display) {
+               mutex_lock(&dev->mode_config.mutex);
+               drm_helper_resume_force_mode(dev);
+               mutex_unlock(&dev->mode_config.mutex);
+       }
  
-       mutex_unlock(&dev->struct_mutex);
        return 0;
  }
  
@@@ -524,8 -610,6 +610,6 @@@ static struct drm_driver driver = 
        .irq_uninstall = i915_driver_irq_uninstall,
        .irq_handler = i915_driver_irq_handler,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .master_create = i915_master_create,
        .master_destroy = i915_master_destroy,
  #if defined(CONFIG_DEBUG_FS)
  #ifdef CONFIG_COMPAT
                 .compat_ioctl = i915_compat_ioctl,
  #endif
 +               .llseek = noop_llseek,
        },
  
        .pci_driver = {
@@@ -37,7 -37,9 +37,9 @@@
  #include <linux/intel-gtt.h>
  
  static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
- static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+ static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
+                                                 bool pipelined);
  static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
  static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
  static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@@ -46,7 -48,8 +48,8 @@@ static int i915_gem_object_set_cpu_read
                                                     uint64_t offset,
                                                     uint64_t size);
  static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
- static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+                                         bool interruptible);
  static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
                                           unsigned alignment);
  static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
@@@ -55,9 -58,111 +58,111 @@@ static int i915_gem_phys_pwrite(struct 
                                struct drm_file *file_priv);
  static void i915_gem_free_object_tail(struct drm_gem_object *obj);
  
+ static int
+ i915_gem_object_get_pages(struct drm_gem_object *obj,
+                         gfp_t gfpmask);
+ static void
+ i915_gem_object_put_pages(struct drm_gem_object *obj);
  static LIST_HEAD(shrink_list);
  static DEFINE_SPINLOCK(shrink_list_lock);
  
+ /* some bookkeeping */
+ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
+                                 size_t size)
+ {
+       dev_priv->mm.object_count++;
+       dev_priv->mm.object_memory += size;
+ }
+ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
+                                    size_t size)
+ {
+       dev_priv->mm.object_count--;
+       dev_priv->mm.object_memory -= size;
+ }
+ static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
+                                 size_t size)
+ {
+       dev_priv->mm.gtt_count++;
+       dev_priv->mm.gtt_memory += size;
+ }
+ static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
+                                    size_t size)
+ {
+       dev_priv->mm.gtt_count--;
+       dev_priv->mm.gtt_memory -= size;
+ }
+ static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
+                                 size_t size)
+ {
+       dev_priv->mm.pin_count++;
+       dev_priv->mm.pin_memory += size;
+ }
+ static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
+                                    size_t size)
+ {
+       dev_priv->mm.pin_count--;
+       dev_priv->mm.pin_memory -= size;
+ }
+ int
+ i915_gem_check_is_wedged(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct completion *x = &dev_priv->error_completion;
+       unsigned long flags;
+       int ret;
+       if (!atomic_read(&dev_priv->mm.wedged))
+               return 0;
+       ret = wait_for_completion_interruptible(x);
+       if (ret)
+               return ret;
+       /* Success, we reset the GPU! */
+       if (!atomic_read(&dev_priv->mm.wedged))
+               return 0;
+       /* GPU is hung, bump the completion count to account for
+        * the token we just consumed so that we never hit zero and
+        * end up waiting upon a subsequent completion event that
+        * will never happen.
+        */
+       spin_lock_irqsave(&x->wait.lock, flags);
+       x->done++;
+       spin_unlock_irqrestore(&x->wait.lock, flags);
+       return -EIO;
+ }
+ static int i915_mutex_lock_interruptible(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+       ret = i915_gem_check_is_wedged(dev);
+       if (ret)
+               return ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+       if (atomic_read(&dev_priv->mm.wedged)) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EAGAIN;
+       }
+       WARN_ON(i915_verify_lists(dev));
+       return 0;
+ }
  static inline bool
  i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
  {
                obj_priv->pin_count == 0;
  }
  
- int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ int i915_gem_do_init(struct drm_device *dev,
+                    unsigned long start,
                     unsigned long end)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        drm_mm_init(&dev_priv->mm.gtt_space, start,
                    end - start);
  
-       dev->gtt_total = (uint32_t) (end - start);
+       dev_priv->mm.gtt_total = end - start;
  
        return 0;
  }
@@@ -103,14 -209,16 +209,16 @@@ in
  i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file_priv)
  {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_get_aperture *args = data;
  
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
  
-       args->aper_size = dev->gtt_total;
-       args->aper_available_size = (args->aper_size -
-                                    atomic_read(&dev->pin_memory));
+       mutex_lock(&dev->struct_mutex);
+       args->aper_size = dev_priv->mm.gtt_total;
+       args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
+       mutex_unlock(&dev->struct_mutex);
  
        return 0;
  }
@@@ -136,12 -244,17 +244,17 @@@ i915_gem_create_ioctl(struct drm_devic
                return -ENOMEM;
  
        ret = drm_gem_handle_create(file_priv, obj, &handle);
-       /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference_unlocked(obj);
        if (ret) {
+               drm_gem_object_release(obj);
+               i915_gem_info_remove_obj(dev->dev_private, obj->size);
+               kfree(obj);
                return ret;
        }
  
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference(obj);
+       trace_i915_gem_object_create(obj);
        args->handle = handle;
        return 0;
  }
@@@ -152,19 -265,14 +265,14 @@@ fast_shmem_read(struct page **pages
                char __user *data,
                int length)
  {
-       char __iomem *vaddr;
-       int unwritten;
+       char *vaddr;
+       int ret;
  
 -      vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
 +      vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
-       if (vaddr == NULL)
-               return -ENOMEM;
-       unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+       ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
 -      kunmap_atomic(vaddr, KM_USER0);
 +      kunmap_atomic(vaddr);
  
-       if (unwritten)
-               return -EFAULT;
-       return 0;
+       return ret;
  }
  
  static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
@@@ -258,22 -366,10 +366,10 @@@ i915_gem_shmem_pread_fast(struct drm_de
        loff_t offset, page_base;
        char __user *user_data;
        int page_offset, page_length;
-       int ret;
  
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
  
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_gem_object_get_pages(obj, 0);
-       if (ret != 0)
-               goto fail_unlock;
-       ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
-                                                       args->size);
-       if (ret != 0)
-               goto fail_put_pages;
        obj_priv = to_intel_bo(obj);
        offset = args->offset;
  
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
  
-               ret = fast_shmem_read(obj_priv->pages,
-                                     page_base, page_offset,
-                                     user_data, page_length);
-               if (ret)
-                       goto fail_put_pages;
+               if (fast_shmem_read(obj_priv->pages,
+                                   page_base, page_offset,
+                                   user_data, page_length))
+                       return -EFAULT;
  
                remain -= page_length;
                user_data += page_length;
                offset += page_length;
        }
  
- fail_put_pages:
-       i915_gem_object_put_pages(obj);
- fail_unlock:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
+       return 0;
  }
  
  static int
@@@ -367,31 -457,28 +457,28 @@@ i915_gem_shmem_pread_slow(struct drm_de
        last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
        num_pages = last_data_page - first_data_page + 1;
  
-       user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+       user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
        if (user_pages == NULL)
                return -ENOMEM;
  
+       mutex_unlock(&dev->struct_mutex);
        down_read(&mm->mmap_sem);
        pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
                                      num_pages, 1, 0, user_pages, NULL);
        up_read(&mm->mmap_sem);
+       mutex_lock(&dev->struct_mutex);
        if (pinned_pages < num_pages) {
                ret = -EFAULT;
-               goto fail_put_user_pages;
+               goto out;
        }
  
-       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_gem_object_get_pages_or_evict(obj);
+       ret = i915_gem_object_set_cpu_read_domain_range(obj,
+                                                       args->offset,
+                                                       args->size);
        if (ret)
-               goto fail_unlock;
+               goto out;
  
-       ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
-                                                       args->size);
-       if (ret != 0)
-               goto fail_put_pages;
+       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  
        obj_priv = to_intel_bo(obj);
        offset = args->offset;
                offset += page_length;
        }
  
- fail_put_pages:
-       i915_gem_object_put_pages(obj);
- fail_unlock:
-       mutex_unlock(&dev->struct_mutex);
- fail_put_user_pages:
+ out:
        for (i = 0; i < pinned_pages; i++) {
                SetPageDirty(user_pages[i]);
                page_cache_release(user_pages[i]);
@@@ -462,37 -545,64 +545,64 @@@ i915_gem_pread_ioctl(struct drm_device 
        struct drm_i915_gem_pread *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
-       int ret;
+       int ret = 0;
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
  
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-       if (obj == NULL)
-               return -ENOENT;
+       if (obj == NULL) {
+               ret = -ENOENT;
+               goto unlock;
+       }
        obj_priv = to_intel_bo(obj);
  
        /* Bounds check source.  */
        if (args->offset > obj->size || args->size > obj->size - args->offset) {
                ret = -EINVAL;
-               goto err;
+               goto out;
        }
  
+       if (args->size == 0)
+               goto out;
        if (!access_ok(VERIFY_WRITE,
                       (char __user *)(uintptr_t)args->data_ptr,
                       args->size)) {
                ret = -EFAULT;
-               goto err;
+               goto out;
        }
  
-       if (i915_gem_object_needs_bit17_swizzle(obj)) {
-               ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
-       } else {
-               ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
-               if (ret != 0)
-                       ret = i915_gem_shmem_pread_slow(dev, obj, args,
-                                                       file_priv);
+       ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
+                                      args->size);
+       if (ret) {
+               ret = -EFAULT;
+               goto out;
        }
  
- err:
-       drm_gem_object_unreference_unlocked(obj);
+       ret = i915_gem_object_get_pages_or_evict(obj);
+       if (ret)
+               goto out;
+       ret = i915_gem_object_set_cpu_read_domain_range(obj,
+                                                       args->offset,
+                                                       args->size);
+       if (ret)
+               goto out_put;
+       ret = -EFAULT;
+       if (!i915_gem_object_needs_bit17_swizzle(obj))
+               ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+       if (ret == -EFAULT)
+               ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+ out_put:
+       i915_gem_object_put_pages(obj);
+ out:
+       drm_gem_object_unreference(obj);
+ unlock:
+       mutex_unlock(&dev->struct_mutex);
        return ret;
  }
  
@@@ -509,13 -619,11 +619,11 @@@ fast_user_write(struct io_mapping *mapp
        char *vaddr_atomic;
        unsigned long unwritten;
  
 -      vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
 +      vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
        unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
                                                      user_data, length);
 -      io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
 +      io_mapping_unmap_atomic(vaddr_atomic);
-       if (unwritten)
-               return -EFAULT;
-       return 0;
+       return unwritten;
  }
  
  /* Here's the write path which can sleep for
@@@ -548,18 -656,14 +656,14 @@@ fast_shmem_write(struct page **pages
                 char __user *data,
                 int length)
  {
-       char __iomem *vaddr;
-       unsigned long unwritten;
+       char *vaddr;
+       int ret;
  
 -      vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
 +      vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
-       if (vaddr == NULL)
-               return -ENOMEM;
-       unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
+       ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
 -      kunmap_atomic(vaddr, KM_USER0);
 +      kunmap_atomic(vaddr);
  
-       if (unwritten)
-               return -EFAULT;
-       return 0;
+       return ret;
  }
  
  /**
@@@ -577,22 -681,10 +681,10 @@@ i915_gem_gtt_pwrite_fast(struct drm_dev
        loff_t offset, page_base;
        char __user *user_data;
        int page_offset, page_length;
-       int ret;
  
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
  
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_gem_object_pin(obj, 0);
-       if (ret) {
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
-       ret = i915_gem_object_set_to_gtt_domain(obj, 1);
-       if (ret)
-               goto fail;
        obj_priv = to_intel_bo(obj);
        offset = obj_priv->gtt_offset + args->offset;
  
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
  
-               ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
-                                      page_offset, user_data, page_length);
                /* If we get a fault while copying data, then (presumably) our
                 * source page isn't available.  Return the error and we'll
                 * retry in the slow path.
                 */
-               if (ret)
-                       goto fail;
+               if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
+                                   page_offset, user_data, page_length))
+                       return -EFAULT;
  
                remain -= page_length;
                user_data += page_length;
                offset += page_length;
        }
  
- fail:
-       i915_gem_object_unpin(obj);
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
+       return 0;
  }
  
  /**
@@@ -665,27 -752,24 +752,24 @@@ i915_gem_gtt_pwrite_slow(struct drm_dev
        last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
        num_pages = last_data_page - first_data_page + 1;
  
-       user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+       user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
        if (user_pages == NULL)
                return -ENOMEM;
  
+       mutex_unlock(&dev->struct_mutex);
        down_read(&mm->mmap_sem);
        pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
                                      num_pages, 0, 0, user_pages, NULL);
        up_read(&mm->mmap_sem);
+       mutex_lock(&dev->struct_mutex);
        if (pinned_pages < num_pages) {
                ret = -EFAULT;
                goto out_unpin_pages;
        }
  
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_gem_object_pin(obj, 0);
-       if (ret)
-               goto out_unlock;
        ret = i915_gem_object_set_to_gtt_domain(obj, 1);
        if (ret)
-               goto out_unpin_object;
+               goto out_unpin_pages;
  
        obj_priv = to_intel_bo(obj);
        offset = obj_priv->gtt_offset + args->offset;
                data_ptr += page_length;
        }
  
- out_unpin_object:
-       i915_gem_object_unpin(obj);
- out_unlock:
-       mutex_unlock(&dev->struct_mutex);
  out_unpin_pages:
        for (i = 0; i < pinned_pages; i++)
                page_cache_release(user_pages[i]);
@@@ -747,21 -827,10 +827,10 @@@ i915_gem_shmem_pwrite_fast(struct drm_d
        loff_t offset, page_base;
        char __user *user_data;
        int page_offset, page_length;
-       int ret;
  
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
  
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_gem_object_get_pages(obj, 0);
-       if (ret != 0)
-               goto fail_unlock;
-       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-       if (ret != 0)
-               goto fail_put_pages;
        obj_priv = to_intel_bo(obj);
        offset = args->offset;
        obj_priv->dirty = 1;
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
  
-               ret = fast_shmem_write(obj_priv->pages,
+               if (fast_shmem_write(obj_priv->pages,
                                       page_base, page_offset,
-                                      user_data, page_length);
-               if (ret)
-                       goto fail_put_pages;
+                                      user_data, page_length))
+                       return -EFAULT;
  
                remain -= page_length;
                user_data += page_length;
                offset += page_length;
        }
  
- fail_put_pages:
-       i915_gem_object_put_pages(obj);
- fail_unlock:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
+       return 0;
  }
  
  /**
@@@ -833,30 -896,26 +896,26 @@@ i915_gem_shmem_pwrite_slow(struct drm_d
        last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
        num_pages = last_data_page - first_data_page + 1;
  
-       user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+       user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
        if (user_pages == NULL)
                return -ENOMEM;
  
+       mutex_unlock(&dev->struct_mutex);
        down_read(&mm->mmap_sem);
        pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
                                      num_pages, 0, 0, user_pages, NULL);
        up_read(&mm->mmap_sem);
+       mutex_lock(&dev->struct_mutex);
        if (pinned_pages < num_pages) {
                ret = -EFAULT;
-               goto fail_put_user_pages;
+               goto out;
        }
  
-       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_gem_object_get_pages_or_evict(obj);
+       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
        if (ret)
-               goto fail_unlock;
+               goto out;
  
-       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-       if (ret != 0)
-               goto fail_put_pages;
+       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  
        obj_priv = to_intel_bo(obj);
        offset = args->offset;
                offset += page_length;
        }
  
- fail_put_pages:
-       i915_gem_object_put_pages(obj);
- fail_unlock:
-       mutex_unlock(&dev->struct_mutex);
- fail_put_user_pages:
+ out:
        for (i = 0; i < pinned_pages; i++)
                page_cache_release(user_pages[i]);
        drm_free_large(user_pages);
   */
  int
  i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv)
+                     struct drm_file *file)
  {
        struct drm_i915_gem_pwrite *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
        int ret = 0;
  
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-       if (obj == NULL)
-               return -ENOENT;
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
+       obj = drm_gem_object_lookup(dev, file, args->handle);
+       if (obj == NULL) {
+               ret = -ENOENT;
+               goto unlock;
+       }
        obj_priv = to_intel_bo(obj);
  
        /* Bounds check destination. */
        if (args->offset > obj->size || args->size > obj->size - args->offset) {
                ret = -EINVAL;
-               goto err;
+               goto out;
        }
  
+       if (args->size == 0)
+               goto out;
        if (!access_ok(VERIFY_READ,
                       (char __user *)(uintptr_t)args->data_ptr,
                       args->size)) {
                ret = -EFAULT;
-               goto err;
+               goto out;
+       }
+       ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
+                                     args->size);
+       if (ret) {
+               ret = -EFAULT;
+               goto out;
        }
  
        /* We can only do the GTT pwrite on untiled buffers, as otherwise
         * perspective, requiring manual detiling by the client.
         */
        if (obj_priv->phys_obj)
-               ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
+               ret = i915_gem_phys_pwrite(dev, obj, args, file);
        else if (obj_priv->tiling_mode == I915_TILING_NONE &&
-                dev->gtt_total != 0 &&
+                obj_priv->gtt_space &&
                 obj->write_domain != I915_GEM_DOMAIN_CPU) {
-               ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
-               if (ret == -EFAULT) {
-                       ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
-                                                      file_priv);
-               }
-       } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
-               ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
+               ret = i915_gem_object_pin(obj, 0);
+               if (ret)
+                       goto out;
+               ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+               if (ret)
+                       goto out_unpin;
+               ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
+               if (ret == -EFAULT)
+                       ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
+ out_unpin:
+               i915_gem_object_unpin(obj);
        } else {
-               ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
-               if (ret == -EFAULT) {
-                       ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
-                                                        file_priv);
-               }
-       }
+               ret = i915_gem_object_get_pages_or_evict(obj);
+               if (ret)
+                       goto out;
  
- #if WATCH_PWRITE
-       if (ret)
-               DRM_INFO("pwrite failed %d\n", ret);
- #endif
+               ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+               if (ret)
+                       goto out_put;
  
- err:
-       drm_gem_object_unreference_unlocked(obj);
+               ret = -EFAULT;
+               if (!i915_gem_object_needs_bit17_swizzle(obj))
+                       ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
+               if (ret == -EFAULT)
+                       ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+ out_put:
+               i915_gem_object_put_pages(obj);
+       }
+ out:
+       drm_gem_object_unreference(obj);
+ unlock:
+       mutex_unlock(&dev->struct_mutex);
        return ret;
  }
  
@@@ -1014,19 -1101,19 +1101,19 @@@ i915_gem_set_domain_ioctl(struct drm_de
        if (write_domain != 0 && read_domains != write_domain)
                return -EINVAL;
  
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-       if (obj == NULL)
-               return -ENOENT;
+       if (obj == NULL) {
+               ret = -ENOENT;
+               goto unlock;
+       }
        obj_priv = to_intel_bo(obj);
  
-       mutex_lock(&dev->struct_mutex);
        intel_mark_busy(dev, obj);
  
- #if WATCH_BUF
-       DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
-                obj, obj->size, read_domains, write_domain);
- #endif
        if (read_domains & I915_GEM_DOMAIN_GTT) {
                ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  
                ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
        }
  
-       
        /* Maintain LRU order of "inactive" objects */
        if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
-               list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+               list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
  
        drm_gem_object_unreference(obj);
+ unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
  }
@@@ -1069,30 -1156,27 +1156,27 @@@ i915_gem_sw_finish_ioctl(struct drm_dev
  {
        struct drm_i915_gem_sw_finish *args = data;
        struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
        int ret = 0;
  
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
  
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
-               mutex_unlock(&dev->struct_mutex);
-               return -ENOENT;
+               ret = -ENOENT;
+               goto unlock;
        }
  
- #if WATCH_BUF
-       DRM_INFO("%s: sw_finish %d (%p %zd)\n",
-                __func__, args->handle, obj, obj->size);
- #endif
-       obj_priv = to_intel_bo(obj);
        /* Pinned buffers may be scanout, so flush the cache */
-       if (obj_priv->pin_count)
+       if (to_intel_bo(obj)->pin_count)
                i915_gem_object_flush_cpu_write_domain(obj);
  
        drm_gem_object_unreference(obj);
+ unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
  }
@@@ -1181,13 -1265,13 +1265,13 @@@ int i915_gem_fault(struct vm_area_struc
  
        /* Need a new fence register? */
        if (obj_priv->tiling_mode != I915_TILING_NONE) {
-               ret = i915_gem_object_get_fence_reg(obj);
+               ret = i915_gem_object_get_fence_reg(obj, true);
                if (ret)
                        goto unlock;
        }
  
        if (i915_gem_object_is_inactive(obj_priv))
-               list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+               list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
  
        pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
                page_offset;
@@@ -1246,7 -1330,7 +1330,7 @@@ i915_gem_create_mmap_offset(struct drm_
                                                    obj->size / PAGE_SIZE, 0, 0);
        if (!list->file_offset_node) {
                DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
-               ret = -ENOMEM;
+               ret = -ENOSPC;
                goto out_free_list;
        }
  
        }
  
        list->hash.key = list->file_offset_node->start;
-       if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
+       ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+       if (ret) {
                DRM_ERROR("failed to add to map hash\n");
-               ret = -ENOMEM;
                goto out_free_mm;
        }
  
@@@ -1345,14 -1429,14 +1429,14 @@@ i915_gem_get_gtt_alignment(struct drm_g
         * Minimum alignment is 4k (GTT page size), but might be greater
         * if a fence register is needed for the object.
         */
-       if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
+       if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
                return 4096;
  
        /*
         * Previous chips need to be aligned to the size of the smallest
         * fence register that can contain the object.
         */
-       if (IS_I9XX(dev))
+       if (INTEL_INFO(dev)->gen == 3)
                start = 1024*1024;
        else
                start = 512*1024;
@@@ -1390,29 -1474,27 +1474,27 @@@ i915_gem_mmap_gtt_ioctl(struct drm_devi
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
  
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-       if (obj == NULL)
-               return -ENOENT;
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
  
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL) {
+               ret = -ENOENT;
+               goto unlock;
+       }
        obj_priv = to_intel_bo(obj);
  
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to mmap a purgeable buffer\n");
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
  
        if (!obj_priv->mmap_offset) {
                ret = i915_gem_create_mmap_offset(obj);
-               if (ret) {
-                       drm_gem_object_unreference(obj);
-                       mutex_unlock(&dev->struct_mutex);
-                       return ret;
-               }
+               if (ret)
+                       goto out;
        }
  
        args->offset = obj_priv->mmap_offset;
         */
        if (!obj_priv->agp_mem) {
                ret = i915_gem_object_bind_to_gtt(obj, 0);
-               if (ret) {
-                       drm_gem_object_unreference(obj);
-                       mutex_unlock(&dev->struct_mutex);
-                       return ret;
-               }
+               if (ret)
+                       goto out;
        }
  
+ out:
        drm_gem_object_unreference(obj);
+ unlock:
        mutex_unlock(&dev->struct_mutex);
-       return 0;
+       return ret;
  }
  
- void
static void
  i915_gem_object_put_pages(struct drm_gem_object *obj)
  {
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        obj_priv->pages = NULL;
  }
  
+ static uint32_t
+ i915_gem_next_request_seqno(struct drm_device *dev,
+                           struct intel_ring_buffer *ring)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       ring->outstanding_lazy_request = true;
+       return dev_priv->next_seqno;
+ }
  static void
- i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+ i915_gem_object_move_to_active(struct drm_gem_object *obj,
                               struct intel_ring_buffer *ring)
  {
        struct drm_device *dev = obj->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
        BUG_ON(ring == NULL);
        obj_priv->ring = ring;
  
                drm_gem_object_reference(obj);
                obj_priv->active = 1;
        }
        /* Move from whatever list we were on to the tail of execution. */
-       spin_lock(&dev_priv->mm.active_list_lock);
-       list_move_tail(&obj_priv->list, &ring->active_list);
-       spin_unlock(&dev_priv->mm.active_list_lock);
+       list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
+       list_move_tail(&obj_priv->ring_list, &ring->active_list);
        obj_priv->last_rendering_seqno = seqno;
  }
  
@@@ -1500,7 -1592,8 +1592,8 @@@ i915_gem_object_move_to_flushing(struc
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  
        BUG_ON(!obj_priv->active);
-       list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
+       list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
+       list_del_init(&obj_priv->ring_list);
        obj_priv->last_rendering_seqno = 0;
  }
  
@@@ -1538,11 -1631,11 +1631,11 @@@ i915_gem_object_move_to_inactive(struc
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  
-       i915_verify_inactive(dev, __FILE__, __LINE__);
        if (obj_priv->pin_count != 0)
-               list_del_init(&obj_priv->list);
+               list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
        else
-               list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+               list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+       list_del_init(&obj_priv->ring_list);
  
        BUG_ON(!list_empty(&obj_priv->gpu_write_list));
  
                obj_priv->active = 0;
                drm_gem_object_unreference(obj);
        }
-       i915_verify_inactive(dev, __FILE__, __LINE__);
+       WARN_ON(i915_verify_lists(dev));
  }
  
  static void
  i915_gem_process_flushing_list(struct drm_device *dev,
-                              uint32_t flush_domains, uint32_t seqno,
+                              uint32_t flush_domains,
                               struct intel_ring_buffer *ring)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv, *next;
  
        list_for_each_entry_safe(obj_priv, next,
-                                &dev_priv->mm.gpu_write_list,
+                                &ring->gpu_write_list,
                                 gpu_write_list) {
                struct drm_gem_object *obj = &obj_priv->base;
  
-               if ((obj->write_domain & flush_domains) ==
-                   obj->write_domain &&
-                   obj_priv->ring->ring_flag == ring->ring_flag) {
+               if (obj->write_domain & flush_domains) {
                        uint32_t old_write_domain = obj->write_domain;
  
                        obj->write_domain = 0;
                        list_del_init(&obj_priv->gpu_write_list);
-                       i915_gem_object_move_to_active(obj, seqno, ring);
+                       i915_gem_object_move_to_active(obj, ring);
  
                        /* update the fence lru list */
                        if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  }
  
  uint32_t
- i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-                uint32_t flush_domains, struct intel_ring_buffer *ring)
+ i915_add_request(struct drm_device *dev,
+                struct drm_file *file,
+                struct drm_i915_gem_request *request,
+                struct intel_ring_buffer *ring)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_file_private *i915_file_priv = NULL;
-       struct drm_i915_gem_request *request;
+       struct drm_i915_file_private *file_priv = NULL;
        uint32_t seqno;
        int was_empty;
  
-       if (file_priv != NULL)
-               i915_file_priv = file_priv->driver_priv;
+       if (file != NULL)
+               file_priv = file->driver_priv;
  
-       request = kzalloc(sizeof(*request), GFP_KERNEL);
-       if (request == NULL)
-               return 0;
+       if (request == NULL) {
+               request = kzalloc(sizeof(*request), GFP_KERNEL);
+               if (request == NULL)
+                       return 0;
+       }
  
-       seqno = ring->add_request(dev, ring, file_priv, flush_domains);
+       seqno = ring->add_request(dev, ring, 0);
+       ring->outstanding_lazy_request = false;
  
        request->seqno = seqno;
        request->ring = ring;
        was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
  
-       if (i915_file_priv) {
+       if (file_priv) {
+               spin_lock(&file_priv->mm.lock);
+               request->file_priv = file_priv;
                list_add_tail(&request->client_list,
-                             &i915_file_priv->mm.request_list);
-       } else {
-               INIT_LIST_HEAD(&request->client_list);
+                             &file_priv->mm.request_list);
+               spin_unlock(&file_priv->mm.lock);
        }
  
-       /* Associate any objects on the flushing list matching the write
-        * domain we're flushing with our flush.
-        */
-       if (flush_domains != 0) 
-               i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
        if (!dev_priv->mm.suspended) {
-               mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+               mod_timer(&dev_priv->hangcheck_timer,
+                         jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
                if (was_empty)
-                       queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+                       queue_delayed_work(dev_priv->wq,
+                                          &dev_priv->mm.retire_work, HZ);
        }
        return seqno;
  }
   * Ensures that all commands in the ring are finished
   * before signalling the CPU
   */
- static uint32_t
+ static void
  i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
  {
        uint32_t flush_domains = 0;
  
        /* The sampler always gets flushed on i965 (sigh) */
-       if (IS_I965G(dev))
+       if (INTEL_INFO(dev)->gen >= 4)
                flush_domains |= I915_GEM_DOMAIN_SAMPLER;
  
        ring->flush(dev, ring,
                        I915_GEM_DOMAIN_COMMAND, flush_domains);
-       return flush_domains;
  }
  
- /**
-  * Moves buffers associated only with the given active seqno from the active
-  * to inactive list, potentially freeing them.
-  */
- static void
- i915_gem_retire_request(struct drm_device *dev,
-                       struct drm_i915_gem_request *request)
+ static inline void
+ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
  {
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_file_private *file_priv = request->file_priv;
  
-       trace_i915_gem_request_retire(dev, request->seqno);
+       if (!file_priv)
+               return;
  
-       /* Move any buffers on the active list that are no longer referenced
-        * by the ringbuffer to the flushing/inactive lists as appropriate.
-        */
-       spin_lock(&dev_priv->mm.active_list_lock);
-       while (!list_empty(&request->ring->active_list)) {
-               struct drm_gem_object *obj;
-               struct drm_i915_gem_object *obj_priv;
+       spin_lock(&file_priv->mm.lock);
+       list_del(&request->client_list);
+       request->file_priv = NULL;
+       spin_unlock(&file_priv->mm.lock);
+ }
  
-               obj_priv = list_first_entry(&request->ring->active_list,
-                                           struct drm_i915_gem_object,
-                                           list);
-               obj = &obj_priv->base;
+ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+                                     struct intel_ring_buffer *ring)
+ {
+       while (!list_empty(&ring->request_list)) {
+               struct drm_i915_gem_request *request;
  
-               /* If the seqno being retired doesn't match the oldest in the
-                * list, then the oldest in the list must still be newer than
-                * this seqno.
-                */
-               if (obj_priv->last_rendering_seqno != request->seqno)
-                       goto out;
+               request = list_first_entry(&ring->request_list,
+                                          struct drm_i915_gem_request,
+                                          list);
  
- #if WATCH_LRU
-               DRM_INFO("%s: retire %d moves to inactive list %p\n",
-                        __func__, request->seqno, obj);
- #endif
+               list_del(&request->list);
+               i915_gem_request_remove_from_client(request);
+               kfree(request);
+       }
  
-               if (obj->write_domain != 0)
-                       i915_gem_object_move_to_flushing(obj);
-               else {
-                       /* Take a reference on the object so it won't be
-                        * freed while the spinlock is held.  The list
-                        * protection for this spinlock is safe when breaking
-                        * the lock like this since the next thing we do
-                        * is just get the head of the list again.
-                        */
-                       drm_gem_object_reference(obj);
-                       i915_gem_object_move_to_inactive(obj);
-                       spin_unlock(&dev_priv->mm.active_list_lock);
-                       drm_gem_object_unreference(obj);
-                       spin_lock(&dev_priv->mm.active_list_lock);
-               }
+       while (!list_empty(&ring->active_list)) {
+               struct drm_i915_gem_object *obj_priv;
+               obj_priv = list_first_entry(&ring->active_list,
+                                           struct drm_i915_gem_object,
+                                           ring_list);
+               obj_priv->base.write_domain = 0;
+               list_del_init(&obj_priv->gpu_write_list);
+               i915_gem_object_move_to_inactive(&obj_priv->base);
        }
- out:
-       spin_unlock(&dev_priv->mm.active_list_lock);
  }
  
- /**
-  * Returns true if seq1 is later than seq2.
-  */
- bool
- i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+ void i915_gem_reset(struct drm_device *dev)
  {
-       return (int32_t)(seq1 - seq2) >= 0;
- }
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       int i;
  
- uint32_t
- i915_get_gem_seqno(struct drm_device *dev,
-                  struct intel_ring_buffer *ring)
- {
-       return ring->get_gem_seqno(dev, ring);
+       i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
+       i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
+       i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
+       /* Remove anything from the flushing lists. The GPU cache is likely
+        * to be lost on reset along with the data, so simply move the
+        * lost bo to the inactive list.
+        */
+       while (!list_empty(&dev_priv->mm.flushing_list)) {
+               obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+                                           struct drm_i915_gem_object,
+                                           mm_list);
+               obj_priv->base.write_domain = 0;
+               list_del_init(&obj_priv->gpu_write_list);
+               i915_gem_object_move_to_inactive(&obj_priv->base);
+       }
+       /* Move everything out of the GPU domains to ensure we do any
+        * necessary invalidation upon reuse.
+        */
+       list_for_each_entry(obj_priv,
+                           &dev_priv->mm.inactive_list,
+                           mm_list)
+       {
+               obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+       }
+       /* The fence registers are invalidated so clear them out */
+       for (i = 0; i < 16; i++) {
+               struct drm_i915_fence_reg *reg;
+               reg = &dev_priv->fence_regs[i];
+               if (!reg->obj)
+                       continue;
+               i915_gem_clear_fence_reg(reg->obj);
+       }
  }
  
  /**
@@@ -1741,38 -1847,58 +1847,58 @@@ i915_gem_retire_requests_ring(struct dr
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
  
-       if (!ring->status_page.page_addr
-                       || list_empty(&ring->request_list))
+       if (!ring->status_page.page_addr ||
+           list_empty(&ring->request_list))
                return;
  
-       seqno = i915_get_gem_seqno(dev, ring);
+       WARN_ON(i915_verify_lists(dev));
  
+       seqno = ring->get_seqno(dev, ring);
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
-               uint32_t retiring_seqno;
  
                request = list_first_entry(&ring->request_list,
                                           struct drm_i915_gem_request,
                                           list);
-               retiring_seqno = request->seqno;
  
-               if (i915_seqno_passed(seqno, retiring_seqno) ||
-                   atomic_read(&dev_priv->mm.wedged)) {
-                       i915_gem_retire_request(dev, request);
+               if (!i915_seqno_passed(seqno, request->seqno))
+                       break;
+               trace_i915_gem_request_retire(dev, request->seqno);
+               list_del(&request->list);
+               i915_gem_request_remove_from_client(request);
+               kfree(request);
+       }
  
-                       list_del(&request->list);
-                       list_del(&request->client_list);
-                       kfree(request);
-               } else
+       /* Move any buffers on the active list that are no longer referenced
+        * by the ringbuffer to the flushing/inactive lists as appropriate.
+        */
+       while (!list_empty(&ring->active_list)) {
+               struct drm_gem_object *obj;
+               struct drm_i915_gem_object *obj_priv;
+               obj_priv = list_first_entry(&ring->active_list,
+                                           struct drm_i915_gem_object,
+                                           ring_list);
+               if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
                        break;
+               obj = &obj_priv->base;
+               if (obj->write_domain != 0)
+                       i915_gem_object_move_to_flushing(obj);
+               else
+                       i915_gem_object_move_to_inactive(obj);
        }
  
        if (unlikely (dev_priv->trace_irq_seqno &&
                      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
                ring->user_irq_put(dev, ring);
                dev_priv->trace_irq_seqno = 0;
        }
+       WARN_ON(i915_verify_lists(dev));
  }
  
  void
@@@ -1790,16 -1916,16 +1916,16 @@@ i915_gem_retire_requests(struct drm_dev
             */
            list_for_each_entry_safe(obj_priv, tmp,
                                     &dev_priv->mm.deferred_free_list,
-                                    list)
+                                    mm_list)
                    i915_gem_free_object_tail(&obj_priv->base);
        }
  
        i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
-       if (HAS_BSD(dev))
-               i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+       i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+       i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
  }
  
- void
static void
  i915_gem_retire_work_handler(struct work_struct *work)
  {
        drm_i915_private_t *dev_priv;
                                mm.retire_work.work);
        dev = dev_priv->dev;
  
-       mutex_lock(&dev->struct_mutex);
+       /* Come back later if the device is busy... */
+       if (!mutex_trylock(&dev->struct_mutex)) {
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+               return;
+       }
        i915_gem_retire_requests(dev);
  
        if (!dev_priv->mm.suspended &&
                (!list_empty(&dev_priv->render_ring.request_list) ||
-                       (HAS_BSD(dev) &&
-                        !list_empty(&dev_priv->bsd_ring.request_list))))
+                !list_empty(&dev_priv->bsd_ring.request_list) ||
+                !list_empty(&dev_priv->blt_ring.request_list)))
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
        mutex_unlock(&dev->struct_mutex);
  }
  
  int
  i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
-               int interruptible, struct intel_ring_buffer *ring)
+                    bool interruptible, struct intel_ring_buffer *ring)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 ier;
        BUG_ON(seqno == 0);
  
        if (atomic_read(&dev_priv->mm.wedged))
-               return -EIO;
+               return -EAGAIN;
+       if (ring->outstanding_lazy_request) {
+               seqno = i915_add_request(dev, NULL, NULL, ring);
+               if (seqno == 0)
+                       return -ENOMEM;
+       }
+       BUG_ON(seqno == dev_priv->next_seqno);
  
-       if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
+       if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
                if (HAS_PCH_SPLIT(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
                if (interruptible)
                        ret = wait_event_interruptible(ring->irq_queue,
                                i915_seqno_passed(
-                                       ring->get_gem_seqno(dev, ring), seqno)
+                                       ring->get_seqno(dev, ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
                else
                        wait_event(ring->irq_queue,
                                i915_seqno_passed(
-                                       ring->get_gem_seqno(dev, ring), seqno)
+                                       ring->get_seqno(dev, ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
  
                ring->user_irq_put(dev, ring);
                trace_i915_gem_request_wait_end(dev, seqno);
        }
        if (atomic_read(&dev_priv->mm.wedged))
-               ret = -EIO;
+               ret = -EAGAIN;
  
        if (ret && ret != -ERESTARTSYS)
-               DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
-                         __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
+               DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
+                         __func__, ret, seqno, ring->get_seqno(dev, ring),
+                         dev_priv->next_seqno);
  
        /* Directly dispatch request retiring.  While we have the work queue
         * to handle this, the waiter on a request often wants an associated
   */
  static int
  i915_wait_request(struct drm_device *dev, uint32_t seqno,
-               struct intel_ring_buffer *ring)
+                 struct intel_ring_buffer *ring)
  {
        return i915_do_wait_request(dev, seqno, 1, ring);
  }
  
+ static void
+ i915_gem_flush_ring(struct drm_device *dev,
+                   struct drm_file *file_priv,
+                   struct intel_ring_buffer *ring,
+                   uint32_t invalidate_domains,
+                   uint32_t flush_domains)
+ {
+       ring->flush(dev, ring, invalidate_domains, flush_domains);
+       i915_gem_process_flushing_list(dev, flush_domains, ring);
+ }
  static void
  i915_gem_flush(struct drm_device *dev,
+              struct drm_file *file_priv,
               uint32_t invalidate_domains,
-              uint32_t flush_domains)
+              uint32_t flush_domains,
+              uint32_t flush_rings)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        if (flush_domains & I915_GEM_DOMAIN_CPU)
                drm_agp_chipset_flush(dev);
-       dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
-                       invalidate_domains,
-                       flush_domains);
-       if (HAS_BSD(dev))
-               dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
-                               invalidate_domains,
-                               flush_domains);
+       if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
+               if (flush_rings & RING_RENDER)
+                       i915_gem_flush_ring(dev, file_priv,
+                                           &dev_priv->render_ring,
+                                           invalidate_domains, flush_domains);
+               if (flush_rings & RING_BSD)
+                       i915_gem_flush_ring(dev, file_priv,
+                                           &dev_priv->bsd_ring,
+                                           invalidate_domains, flush_domains);
+               if (flush_rings & RING_BLT)
+                       i915_gem_flush_ring(dev, file_priv,
+                                           &dev_priv->blt_ring,
+                                           invalidate_domains, flush_domains);
+       }
  }
  
  /**
   * safe to unbind from the GTT or access from the CPU.
   */
  static int
- i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+ i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+                              bool interruptible)
  {
        struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
         * it.
         */
        if (obj_priv->active) {
- #if WATCH_BUF
-               DRM_INFO("%s: object %p wait for seqno %08x\n",
-                         __func__, obj, obj_priv->last_rendering_seqno);
- #endif
-               ret = i915_wait_request(dev,
-                               obj_priv->last_rendering_seqno, obj_priv->ring);
-               if (ret != 0)
+               ret = i915_do_wait_request(dev,
+                                          obj_priv->last_rendering_seqno,
+                                          interruptible,
+                                          obj_priv->ring);
+               if (ret)
                        return ret;
        }
  
  i915_gem_object_unbind(struct drm_gem_object *obj)
  {
        struct drm_device *dev = obj->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret = 0;
  
- #if WATCH_BUF
-       DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
-       DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
- #endif
        if (obj_priv->gtt_space == NULL)
                return 0;
  
         * should be safe and we need to cleanup or else we might
         * cause memory corruption through use-after-free.
         */
+       if (ret) {
+               i915_gem_clflush_object(obj);
+               obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
+       }
  
        /* release the fence reg _after_ flushing */
        if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
                i915_gem_clear_fence_reg(obj);
  
-       if (obj_priv->agp_mem != NULL) {
-               drm_unbind_agp(obj_priv->agp_mem);
-               drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
-               obj_priv->agp_mem = NULL;
-       }
+       drm_unbind_agp(obj_priv->agp_mem);
+       drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
  
        i915_gem_object_put_pages(obj);
        BUG_ON(obj_priv->pages_refcount);
  
-       if (obj_priv->gtt_space) {
-               atomic_dec(&dev->gtt_count);
-               atomic_sub(obj->size, &dev->gtt_memory);
-               drm_mm_put_block(obj_priv->gtt_space);
-               obj_priv->gtt_space = NULL;
-       }
+       i915_gem_info_remove_gtt(dev_priv, obj->size);
+       list_del_init(&obj_priv->mm_list);
  
-       /* Remove ourselves from the LRU list if present. */
-       spin_lock(&dev_priv->mm.active_list_lock);
-       if (!list_empty(&obj_priv->list))
-               list_del_init(&obj_priv->list);
-       spin_unlock(&dev_priv->mm.active_list_lock);
+       drm_mm_put_block(obj_priv->gtt_space);
+       obj_priv->gtt_space = NULL;
+       obj_priv->gtt_offset = 0;
  
        if (i915_gem_object_is_purgeable(obj_priv))
                i915_gem_object_truncate(obj);
        return ret;
  }
  
+ static int i915_ring_idle(struct drm_device *dev,
+                         struct intel_ring_buffer *ring)
+ {
+       if (list_empty(&ring->gpu_write_list))
+               return 0;
+       i915_gem_flush_ring(dev, NULL, ring,
+                           I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       return i915_wait_request(dev,
+                                i915_gem_next_request_seqno(dev, ring),
+                                ring);
+ }
  int
  i915_gpu_idle(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        bool lists_empty;
-       uint32_t seqno1, seqno2;
        int ret;
  
-       spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
                       list_empty(&dev_priv->render_ring.active_list) &&
-                      (!HAS_BSD(dev) ||
-                       list_empty(&dev_priv->bsd_ring.active_list)));
-       spin_unlock(&dev_priv->mm.active_list_lock);
+                      list_empty(&dev_priv->bsd_ring.active_list) &&
+                      list_empty(&dev_priv->blt_ring.active_list));
        if (lists_empty)
                return 0;
  
        /* Flush everything onto the inactive list. */
-       i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-       seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
-                       &dev_priv->render_ring);
-       if (seqno1 == 0)
-               return -ENOMEM;
-       ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
-       if (HAS_BSD(dev)) {
-               seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
-                               &dev_priv->bsd_ring);
-               if (seqno2 == 0)
-                       return -ENOMEM;
+       ret = i915_ring_idle(dev, &dev_priv->render_ring);
+       if (ret)
+               return ret;
  
-               ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
-               if (ret)
-                       return ret;
-       }
+       ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
+       if (ret)
+               return ret;
  
+       ret = i915_ring_idle(dev, &dev_priv->blt_ring);
+       if (ret)
+               return ret;
  
-       return ret;
+       return 0;
  }
  
- int
static int
  i915_gem_object_get_pages(struct drm_gem_object *obj,
                          gfp_t gfpmask)
  {
@@@ -2241,7 -2392,8 +2392,8 @@@ static void i830_write_fence_reg(struc
        I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
  }
  
- static int i915_find_fence_reg(struct drm_device *dev)
+ static int i915_find_fence_reg(struct drm_device *dev,
+                              bool interruptible)
  {
        struct drm_i915_fence_reg *reg = NULL;
        struct drm_i915_gem_object *obj_priv = NULL;
         * private reference to obj like the other callers of put_fence_reg
         * (set_tiling ioctl) do. */
        drm_gem_object_reference(obj);
-       ret = i915_gem_object_put_fence_reg(obj);
+       ret = i915_gem_object_put_fence_reg(obj, interruptible);
        drm_gem_object_unreference(obj);
        if (ret != 0)
                return ret;
   * and tiling format.
   */
  int
- i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
+ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
+                             bool interruptible)
  {
        struct drm_device *dev = obj->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
                break;
        }
  
-       ret = i915_find_fence_reg(dev);
+       ret = i915_find_fence_reg(dev, interruptible);
        if (ret < 0)
                return ret;
  
@@@ -2421,15 -2574,19 +2574,19 @@@ i915_gem_clear_fence_reg(struct drm_gem
   * i915_gem_object_put_fence_reg - waits on outstanding fenced access
   * to the buffer to finish, and then resets the fence register.
   * @obj: tiled object holding a fence register.
+  * @bool: whether the wait upon the fence is interruptible
   *
   * Zeroes out the fence register itself and clears out the associated
   * data structures in dev_priv and obj_priv.
   */
  int
- i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
+ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
+                             bool interruptible)
  {
        struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct drm_i915_fence_reg *reg;
  
        if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
                return 0;
         * therefore we must wait for any outstanding access to complete
         * before clearing the fence.
         */
-       if (!IS_I965G(dev)) {
+       reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+       if (reg->gpu) {
                int ret;
  
-               ret = i915_gem_object_flush_gpu_write_domain(obj);
-               if (ret != 0)
+               ret = i915_gem_object_flush_gpu_write_domain(obj, true);
+               if (ret)
                        return ret;
  
-               ret = i915_gem_object_wait_rendering(obj);
-               if (ret != 0)
+               ret = i915_gem_object_wait_rendering(obj, interruptible);
+               if (ret)
                        return ret;
+               reg->gpu = false;
        }
  
        i915_gem_object_flush_gtt_write_domain(obj);
-       i915_gem_clear_fence_reg (obj);
+       i915_gem_clear_fence_reg(obj);
  
        return 0;
  }
@@@ -2490,7 -2650,7 +2650,7 @@@ i915_gem_object_bind_to_gtt(struct drm_
        /* If the object is bigger than the entire aperture, reject it early
         * before evicting everything in a vain attempt to find space.
         */
-       if (obj->size > dev->gtt_total) {
+       if (obj->size > dev_priv->mm.gtt_total) {
                DRM_ERROR("Attempting to bind an object larger than the aperture\n");
                return -E2BIG;
        }
   search_free:
        free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
                                        obj->size, alignment, 0);
-       if (free_space != NULL) {
+       if (free_space != NULL)
                obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
                                                       alignment);
-               if (obj_priv->gtt_space != NULL)
-                       obj_priv->gtt_offset = obj_priv->gtt_space->start;
-       }
        if (obj_priv->gtt_space == NULL) {
                /* If the gtt is empty and we're still having trouble
                 * fitting our object in, we're out of memory.
                 */
- #if WATCH_LRU
-               DRM_INFO("%s: GTT full, evicting something\n", __func__);
- #endif
                ret = i915_gem_evict_something(dev, obj->size, alignment);
                if (ret)
                        return ret;
                goto search_free;
        }
  
- #if WATCH_BUF
-       DRM_INFO("Binding object of size %zd at 0x%08x\n",
-                obj->size, obj_priv->gtt_offset);
- #endif
        ret = i915_gem_object_get_pages(obj, gfpmask);
        if (ret) {
                drm_mm_put_block(obj_priv->gtt_space);
        obj_priv->agp_mem = drm_agp_bind_pages(dev,
                                               obj_priv->pages,
                                               obj->size >> PAGE_SHIFT,
-                                              obj_priv->gtt_offset,
+                                              obj_priv->gtt_space->start,
                                               obj_priv->agp_type);
        if (obj_priv->agp_mem == NULL) {
                i915_gem_object_put_pages(obj);
  
                goto search_free;
        }
-       atomic_inc(&dev->gtt_count);
-       atomic_add(obj->size, &dev->gtt_memory);
  
        /* keep track of bounds object by adding it to the inactive list */
-       list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+       list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+       i915_gem_info_add_gtt(dev_priv, obj->size);
  
        /* Assert that the object is not currently in any GPU domain. As it
         * wasn't in the GTT, there shouldn't be any way it could have been in
        BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
        BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
  
+       obj_priv->gtt_offset = obj_priv->gtt_space->start;
        trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
  
        return 0;
@@@ -2603,25 -2753,30 +2753,30 @@@ i915_gem_clflush_object(struct drm_gem_
  
  /** Flushes any GPU write domain for the object if it's dirty. */
  static int
- i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
+                                      bool pipelined)
  {
        struct drm_device *dev = obj->dev;
        uint32_t old_write_domain;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  
        if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
                return 0;
  
        /* Queue the GPU write cache flushing we need. */
        old_write_domain = obj->write_domain;
-       i915_gem_flush(dev, 0, obj->write_domain);
-       if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
-               return -ENOMEM;
+       i915_gem_flush_ring(dev, NULL,
+                           to_intel_bo(obj)->ring,
+                           0, obj->write_domain);
+       BUG_ON(obj->write_domain);
  
        trace_i915_gem_object_change_domain(obj,
                                            obj->read_domains,
                                            old_write_domain);
-       return 0;
+       if (pipelined)
+               return 0;
+       return i915_gem_object_wait_rendering(obj, true);
  }
  
  /** Flushes the GTT write domain for the object if it's dirty. */
@@@ -2665,26 -2820,6 +2820,6 @@@ i915_gem_object_flush_cpu_write_domain(
                                            old_write_domain);
  }
  
- int
- i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
- {
-       int ret = 0;
-       switch (obj->write_domain) {
-       case I915_GEM_DOMAIN_GTT:
-               i915_gem_object_flush_gtt_write_domain(obj);
-               break;
-       case I915_GEM_DOMAIN_CPU:
-               i915_gem_object_flush_cpu_write_domain(obj);
-               break;
-       default:
-               ret = i915_gem_object_flush_gpu_write_domain(obj);
-               break;
-       }
-       return ret;
- }
  /**
   * Moves a single object to the GTT read, and possibly write domain.
   *
@@@ -2702,32 -2837,28 +2837,28 @@@ i915_gem_object_set_to_gtt_domain(struc
        if (obj_priv->gtt_space == NULL)
                return -EINVAL;
  
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj, false);
        if (ret != 0)
                return ret;
  
-       /* Wait on any GPU rendering and flushing to occur. */
-       ret = i915_gem_object_wait_rendering(obj);
-       if (ret != 0)
-               return ret;
+       i915_gem_object_flush_cpu_write_domain(obj);
+       if (write) {
+               ret = i915_gem_object_wait_rendering(obj, true);
+               if (ret)
+                       return ret;
+       }
  
        old_write_domain = obj->write_domain;
        old_read_domains = obj->read_domains;
  
-       /* If we're writing through the GTT domain, then CPU and GPU caches
-        * will need to be invalidated at next use.
-        */
-       if (write)
-               obj->read_domains &= I915_GEM_DOMAIN_GTT;
-       i915_gem_object_flush_cpu_write_domain(obj);
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
        BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
        obj->read_domains |= I915_GEM_DOMAIN_GTT;
        if (write) {
+               obj->read_domains = I915_GEM_DOMAIN_GTT;
                obj->write_domain = I915_GEM_DOMAIN_GTT;
                obj_priv->dirty = 1;
        }
   * wait, as in modesetting process we're not supposed to be interrupted.
   */
  int
- i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
+ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
+                                    bool pipelined)
  {
-       struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       uint32_t old_write_domain, old_read_domains;
+       uint32_t old_read_domains;
        int ret;
  
        /* Not valid to be called on unbound objects. */
        if (obj_priv->gtt_space == NULL)
                return -EINVAL;
  
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj, true);
        if (ret)
                return ret;
  
-       /* Wait on any GPU rendering and flushing to occur. */
-       if (obj_priv->active) {
- #if WATCH_BUF
-               DRM_INFO("%s: object %p wait for seqno %08x\n",
-                         __func__, obj, obj_priv->last_rendering_seqno);
- #endif
-               ret = i915_do_wait_request(dev,
-                               obj_priv->last_rendering_seqno,
-                               0,
-                               obj_priv->ring);
-               if (ret != 0)
+       /* Currently, we are always called from an non-interruptible context. */
+       if (!pipelined) {
+               ret = i915_gem_object_wait_rendering(obj, false);
+               if (ret)
                        return ret;
        }
  
        i915_gem_object_flush_cpu_write_domain(obj);
  
-       old_write_domain = obj->write_domain;
        old_read_domains = obj->read_domains;
-       /* It should now be out of any other write domains, and we can update
-        * the domain values for our changes.
-        */
-       BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
-       obj->read_domains = I915_GEM_DOMAIN_GTT;
-       obj->write_domain = I915_GEM_DOMAIN_GTT;
-       obj_priv->dirty = 1;
+       obj->read_domains |= I915_GEM_DOMAIN_GTT;
  
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
-                                           old_write_domain);
+                                           obj->write_domain);
  
        return 0;
  }
@@@ -2805,12 -2921,7 +2921,7 @@@ i915_gem_object_set_to_cpu_domain(struc
        uint32_t old_write_domain, old_read_domains;
        int ret;
  
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
-       if (ret)
-               return ret;
-       /* Wait on any GPU rendering and flushing to occur. */
-       ret = i915_gem_object_wait_rendering(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj, false);
        if (ret != 0)
                return ret;
  
         */
        i915_gem_object_set_to_full_cpu_read_domain(obj);
  
+       if (write) {
+               ret = i915_gem_object_wait_rendering(obj, true);
+               if (ret)
+                       return ret;
+       }
        old_write_domain = obj->write_domain;
        old_read_domains = obj->read_domains;
  
         * need to be invalidated at next use.
         */
        if (write) {
-               obj->read_domains &= I915_GEM_DOMAIN_CPU;
+               obj->read_domains = I915_GEM_DOMAIN_CPU;
                obj->write_domain = I915_GEM_DOMAIN_CPU;
        }
  
   *            drm_agp_chipset_flush
   */
  static void
- i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
+ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+                                 struct intel_ring_buffer *ring)
  {
        struct drm_device               *dev = obj->dev;
-       drm_i915_private_t              *dev_priv = dev->dev_private;
+       struct drm_i915_private         *dev_priv = dev->dev_private;
        struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
        uint32_t                        invalidate_domains = 0;
        uint32_t                        flush_domains = 0;
        uint32_t                        old_read_domains;
  
-       BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
-       BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
        intel_mark_busy(dev, obj);
  
- #if WATCH_BUF
-       DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
-                __func__, obj,
-                obj->read_domains, obj->pending_read_domains,
-                obj->write_domain, obj->pending_write_domain);
- #endif
        /*
         * If the object isn't moving to a new write domain,
         * let the object stay in multiple read domains
         * stale data. That is, any new read domains.
         */
        invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
-       if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
- #if WATCH_BUF
-               DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
-                        __func__, flush_domains, invalidate_domains);
- #endif
+       if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
                i915_gem_clflush_object(obj);
-       }
  
        old_read_domains = obj->read_domains;
  
                obj->pending_write_domain = obj->write_domain;
        obj->read_domains = obj->pending_read_domains;
  
-       if (flush_domains & I915_GEM_GPU_DOMAINS) {
-               if (obj_priv->ring == &dev_priv->render_ring)
-                       dev_priv->flush_rings |= FLUSH_RENDER_RING;
-               else if (obj_priv->ring == &dev_priv->bsd_ring)
-                       dev_priv->flush_rings |= FLUSH_BSD_RING;
-       }
        dev->invalidate_domains |= invalidate_domains;
        dev->flush_domains |= flush_domains;
- #if WATCH_BUF
-       DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
-                __func__,
-                obj->read_domains, obj->write_domain,
-                dev->invalidate_domains, dev->flush_domains);
- #endif
+       if (flush_domains & I915_GEM_GPU_DOMAINS)
+               dev_priv->mm.flush_rings |= obj_priv->ring->id;
+       if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+               dev_priv->mm.flush_rings |= ring->id;
  
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
@@@ -3106,12 -3201,7 +3201,7 @@@ i915_gem_object_set_cpu_read_domain_ran
        if (offset == 0 && size == obj->size)
                return i915_gem_object_set_to_cpu_domain(obj, 0);
  
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
-       if (ret)
-               return ret;
-       /* Wait on any GPU rendering and flushing to occur. */
-       ret = i915_gem_object_wait_rendering(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj, false);
        if (ret != 0)
                return ret;
        i915_gem_object_flush_gtt_write_domain(obj);
   * Pin an object to the GTT and evaluate the relocations landing in it.
   */
  static int
- i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
-                                struct drm_file *file_priv,
-                                struct drm_i915_gem_exec_object2 *entry,
-                                struct drm_i915_gem_relocation_entry *relocs)
+ i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
+                            struct drm_file *file_priv,
+                            struct drm_i915_gem_exec_object2 *entry)
  {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       int i, ret;
-       void __iomem *reloc_page;
-       bool need_fence;
-       need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-                    obj_priv->tiling_mode != I915_TILING_NONE;
-       /* Check fence reg constraints and rebind if necessary */
-       if (need_fence &&
-           !i915_gem_object_fence_offset_ok(obj,
-                                            obj_priv->tiling_mode)) {
-               ret = i915_gem_object_unbind(obj);
-               if (ret)
-                       return ret;
-       }
+       struct drm_i915_gem_relocation_entry __user *user_relocs;
+       struct drm_gem_object *target_obj = NULL;
+       uint32_t target_handle = 0;
+       int i, ret = 0;
  
-       /* Choose the GTT offset for our buffer and put it there. */
-       ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
-       if (ret)
-               return ret;
+       user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+       for (i = 0; i < entry->relocation_count; i++) {
+               struct drm_i915_gem_relocation_entry reloc;
+               uint32_t target_offset;
  
-       /*
-        * Pre-965 chips need a fence register set up in order to
-        * properly handle blits to/from tiled surfaces.
-        */
-       if (need_fence) {
-               ret = i915_gem_object_get_fence_reg(obj);
-               if (ret != 0) {
-                       i915_gem_object_unpin(obj);
-                       return ret;
+               if (__copy_from_user_inatomic(&reloc,
+                                             user_relocs+i,
+                                             sizeof(reloc))) {
+                       ret = -EFAULT;
+                       break;
                }
-       }
  
-       entry->offset = obj_priv->gtt_offset;
+               if (reloc.target_handle != target_handle) {
+                       drm_gem_object_unreference(target_obj);
  
-       /* Apply the relocations, using the GTT aperture to avoid cache
-        * flushing requirements.
-        */
-       for (i = 0; i < entry->relocation_count; i++) {
-               struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
-               struct drm_gem_object *target_obj;
-               struct drm_i915_gem_object *target_obj_priv;
-               uint32_t reloc_val, reloc_offset;
-               uint32_t __iomem *reloc_entry;
-               target_obj = drm_gem_object_lookup(obj->dev, file_priv,
-                                                  reloc->target_handle);
-               if (target_obj == NULL) {
-                       i915_gem_object_unpin(obj);
-                       return -ENOENT;
+                       target_obj = drm_gem_object_lookup(dev, file_priv,
+                                                          reloc.target_handle);
+                       if (target_obj == NULL) {
+                               ret = -ENOENT;
+                               break;
+                       }
+                       target_handle = reloc.target_handle;
                }
-               target_obj_priv = to_intel_bo(target_obj);
+               target_offset = to_intel_bo(target_obj)->gtt_offset;
  
  #if WATCH_RELOC
                DRM_INFO("%s: obj %p offset %08x target %d "
                         "presumed %08x delta %08x\n",
                         __func__,
                         obj,
-                        (int) reloc->offset,
-                        (int) reloc->target_handle,
-                        (int) reloc->read_domains,
-                        (int) reloc->write_domain,
-                        (int) target_obj_priv->gtt_offset,
-                        (int) reloc->presumed_offset,
-                        reloc->delta);
+                        (int) reloc.offset,
+                        (int) reloc.target_handle,
+                        (int) reloc.read_domains,
+                        (int) reloc.write_domain,
+                        (int) target_offset,
+                        (int) reloc.presumed_offset,
+                        reloc.delta);
  #endif
  
                /* The target buffer should have appeared before us in the
                 * exec_object list, so it should have a GTT space bound by now.
                 */
-               if (target_obj_priv->gtt_space == NULL) {
+               if (target_offset == 0) {
                        DRM_ERROR("No GTT space found for object %d\n",
-                                 reloc->target_handle);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                                 reloc.target_handle);
+                       ret = -EINVAL;
+                       break;
                }
  
                /* Validate that the target is in a valid r/w GPU domain */
-               if (reloc->write_domain & (reloc->write_domain - 1)) {
+               if (reloc.write_domain & (reloc.write_domain - 1)) {
                        DRM_ERROR("reloc with multiple write domains: "
                                  "obj %p target %d offset %d "
                                  "read %08x write %08x",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->offset,
-                                 reloc->read_domains,
-                                 reloc->write_domain);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                                 obj, reloc.target_handle,
+                                 (int) reloc.offset,
+                                 reloc.read_domains,
+                                 reloc.write_domain);
+                       ret = -EINVAL;
+                       break;
                }
-               if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
-                   reloc->read_domains & I915_GEM_DOMAIN_CPU) {
+               if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
+                   reloc.read_domains & I915_GEM_DOMAIN_CPU) {
                        DRM_ERROR("reloc with read/write CPU domains: "
                                  "obj %p target %d offset %d "
                                  "read %08x write %08x",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->offset,
-                                 reloc->read_domains,
-                                 reloc->write_domain);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                                 obj, reloc.target_handle,
+                                 (int) reloc.offset,
+                                 reloc.read_domains,
+                                 reloc.write_domain);
+                       ret = -EINVAL;
+                       break;
                }
-               if (reloc->write_domain && target_obj->pending_write_domain &&
-                   reloc->write_domain != target_obj->pending_write_domain) {
+               if (reloc.write_domain && target_obj->pending_write_domain &&
+                   reloc.write_domain != target_obj->pending_write_domain) {
                        DRM_ERROR("Write domain conflict: "
                                  "obj %p target %d offset %d "
                                  "new %08x old %08x\n",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->offset,
-                                 reloc->write_domain,
+                                 obj, reloc.target_handle,
+                                 (int) reloc.offset,
+                                 reloc.write_domain,
                                  target_obj->pending_write_domain);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       break;
                }
  
-               target_obj->pending_read_domains |= reloc->read_domains;
-               target_obj->pending_write_domain |= reloc->write_domain;
+               target_obj->pending_read_domains |= reloc.read_domains;
+               target_obj->pending_write_domain |= reloc.write_domain;
  
                /* If the relocation already has the right value in it, no
                 * more work needs to be done.
                 */
-               if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
-                       drm_gem_object_unreference(target_obj);
+               if (target_offset == reloc.presumed_offset)
                        continue;
-               }
  
                /* Check that the relocation address is valid... */
-               if (reloc->offset > obj->size - 4) {
+               if (reloc.offset > obj->base.size - 4) {
                        DRM_ERROR("Relocation beyond object bounds: "
                                  "obj %p target %d offset %d size %d.\n",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->offset, (int) obj->size);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                                 obj, reloc.target_handle,
+                                 (int) reloc.offset, (int) obj->base.size);
+                       ret = -EINVAL;
+                       break;
                }
-               if (reloc->offset & 3) {
+               if (reloc.offset & 3) {
                        DRM_ERROR("Relocation not 4-byte aligned: "
                                  "obj %p target %d offset %d.\n",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->offset);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                                 obj, reloc.target_handle,
+                                 (int) reloc.offset);
+                       ret = -EINVAL;
+                       break;
                }
  
                /* and points to somewhere within the target object. */
-               if (reloc->delta >= target_obj->size) {
+               if (reloc.delta >= target_obj->size) {
                        DRM_ERROR("Relocation beyond target object bounds: "
                                  "obj %p target %d delta %d size %d.\n",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->delta, (int) target_obj->size);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
-               }
-               ret = i915_gem_object_set_to_gtt_domain(obj, 1);
-               if (ret != 0) {
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                                 obj, reloc.target_handle,
+                                 (int) reloc.delta, (int) target_obj->size);
+                       ret = -EINVAL;
+                       break;
                }
  
-               /* Map the page containing the relocation we're going to
-                * perform.
-                */
-               reloc_offset = obj_priv->gtt_offset + reloc->offset;
-               reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-                                                     (reloc_offset &
-                                                      ~(PAGE_SIZE - 1)));
-               reloc_entry = (uint32_t __iomem *)(reloc_page +
-                                                  (reloc_offset & (PAGE_SIZE - 1)));
-               reloc_val = target_obj_priv->gtt_offset + reloc->delta;
- #if WATCH_BUF
-               DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
-                         obj, (unsigned int) reloc->offset,
-                         readl(reloc_entry), reloc_val);
- #endif
-               writel(reloc_val, reloc_entry);
-               io_mapping_unmap_atomic(reloc_page);
-               /* The updated presumed offset for this entry will be
-                * copied back out to the user.
-                */
-               reloc->presumed_offset = target_obj_priv->gtt_offset;
-               drm_gem_object_unreference(target_obj);
-       }
- #if WATCH_BUF
-       if (0)
-               i915_gem_dump_object(obj, 128, __func__, ~0);
- #endif
-       return 0;
- }
- /* Throttle our rendering by waiting until the ring has completed our requests
-  * emitted over 20 msec ago.
-  *
-  * Note that if we were to use the current jiffies each time around the loop,
-  * we wouldn't escape the function with any frames outstanding if the time to
-  * render a frame was over 20ms.
-  *
-  * This should get us reasonable parallelism between CPU and GPU but also
-  * relatively low latency when blocking on a particular request to finish.
-  */
- static int
- i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
- {
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
-       int ret = 0;
-       unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+               reloc.delta += target_offset;
+               if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+                       uint32_t page_offset = reloc.offset & ~PAGE_MASK;
+                       char *vaddr;
  
-       mutex_lock(&dev->struct_mutex);
-       while (!list_empty(&i915_file_priv->mm.request_list)) {
-               struct drm_i915_gem_request *request;
 -                      vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT], KM_USER0);
++                      vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
+                       *(uint32_t *)(vaddr + page_offset) = reloc.delta;
 -                      kunmap_atomic(vaddr, KM_USER0);
++                      kunmap_atomic(vaddr);
+               } else {
+                       uint32_t __iomem *reloc_entry;
+                       void __iomem *reloc_page;
  
-               request = list_first_entry(&i915_file_priv->mm.request_list,
-                                          struct drm_i915_gem_request,
-                                          client_list);
+                       ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
+                       if (ret)
+                               break;
  
-               if (time_after_eq(request->emitted_jiffies, recent_enough))
-                       break;
+                       /* Map the page containing the relocation we're going to perform.  */
+                       reloc.offset += obj->gtt_offset;
+                       reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 -                                                            reloc.offset & PAGE_MASK,
 -                                                            KM_USER0);
++                                                            reloc.offset & PAGE_MASK);
+                       reloc_entry = (uint32_t __iomem *)
+                               (reloc_page + (reloc.offset & ~PAGE_MASK));
+                       iowrite32(reloc.delta, reloc_entry);
 -                      io_mapping_unmap_atomic(reloc_page, KM_USER0);
++                      io_mapping_unmap_atomic(reloc_page);
+               }
  
-               ret = i915_wait_request(dev, request->seqno, request->ring);
-               if (ret != 0)
-                       break;
+               /* and update the user's relocation entry */
+               reloc.presumed_offset = target_offset;
+               if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
+                                             &reloc.presumed_offset,
+                                             sizeof(reloc.presumed_offset))) {
+                   ret = -EFAULT;
+                   break;
+               }
        }
-       mutex_unlock(&dev->struct_mutex);
  
+       drm_gem_object_unreference(target_obj);
        return ret;
  }
  
  static int
- i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
-                             uint32_t buffer_count,
-                             struct drm_i915_gem_relocation_entry **relocs)
+ i915_gem_execbuffer_pin(struct drm_device *dev,
+                       struct drm_file *file,
+                       struct drm_gem_object **object_list,
+                       struct drm_i915_gem_exec_object2 *exec_list,
+                       int count)
  {
-       uint32_t reloc_count = 0, reloc_index = 0, i;
-       int ret;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret, i, retry;
  
-       *relocs = NULL;
-       for (i = 0; i < buffer_count; i++) {
-               if (reloc_count + exec_list[i].relocation_count < reloc_count)
-                       return -EINVAL;
-               reloc_count += exec_list[i].relocation_count;
-       }
+       /* attempt to pin all of the buffers into the GTT */
+       for (retry = 0; retry < 2; retry++) {
+               ret = 0;
+               for (i = 0; i < count; i++) {
+                       struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
+                       struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
+                       bool need_fence =
+                               entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+                               obj->tiling_mode != I915_TILING_NONE;
+                       /* Check fence reg constraints and rebind if necessary */
+                       if (need_fence &&
+                           !i915_gem_object_fence_offset_ok(&obj->base,
+                                                            obj->tiling_mode)) {
+                               ret = i915_gem_object_unbind(&obj->base);
+                               if (ret)
+                                       break;
+                       }
  
-       *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
-       if (*relocs == NULL) {
-               DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
-               return -ENOMEM;
-       }
+                       ret = i915_gem_object_pin(&obj->base, entry->alignment);
+                       if (ret)
+                               break;
  
-       for (i = 0; i < buffer_count; i++) {
-               struct drm_i915_gem_relocation_entry __user *user_relocs;
+                       /*
+                        * Pre-965 chips need a fence register set up in order
+                        * to properly handle blits to/from tiled surfaces.
+                        */
+                       if (need_fence) {
+                               ret = i915_gem_object_get_fence_reg(&obj->base, true);
+                               if (ret) {
+                                       i915_gem_object_unpin(&obj->base);
+                                       break;
+                               }
  
-               user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+                               dev_priv->fence_regs[obj->fence_reg].gpu = true;
+                       }
  
-               ret = copy_from_user(&(*relocs)[reloc_index],
-                                    user_relocs,
-                                    exec_list[i].relocation_count *
-                                    sizeof(**relocs));
-               if (ret != 0) {
-                       drm_free_large(*relocs);
-                       *relocs = NULL;
-                       return -EFAULT;
+                       entry->offset = obj->gtt_offset;
                }
  
-               reloc_index += exec_list[i].relocation_count;
+               while (i--)
+                       i915_gem_object_unpin(object_list[i]);
+               if (ret == 0)
+                       break;
+               if (ret != -ENOSPC || retry)
+                       return ret;
+               ret = i915_gem_evict_everything(dev);
+               if (ret)
+                       return ret;
        }
  
        return 0;
  }
  
+ /* Throttle our rendering by waiting until the ring has completed our requests
+  * emitted over 20 msec ago.
+  *
+  * Note that if we were to use the current jiffies each time around the loop,
+  * we wouldn't escape the function with any frames outstanding if the time to
+  * render a frame was over 20ms.
+  *
+  * This should get us reasonable parallelism between CPU and GPU but also
+  * relatively low latency when blocking on a particular request to finish.
+  */
  static int
- i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
-                           uint32_t buffer_count,
-                           struct drm_i915_gem_relocation_entry *relocs)
+ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
  {
-       uint32_t reloc_count = 0, i;
-       int ret = 0;
-       if (relocs == NULL)
-           return 0;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_file_private *file_priv = file->driver_priv;
+       unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+       struct drm_i915_gem_request *request;
+       struct intel_ring_buffer *ring = NULL;
+       u32 seqno = 0;
+       int ret;
  
-       for (i = 0; i < buffer_count; i++) {
-               struct drm_i915_gem_relocation_entry __user *user_relocs;
-               int unwritten;
+       spin_lock(&file_priv->mm.lock);
+       list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
+               if (time_after_eq(request->emitted_jiffies, recent_enough))
+                       break;
  
-               user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+               ring = request->ring;
+               seqno = request->seqno;
+       }
+       spin_unlock(&file_priv->mm.lock);
  
-               unwritten = copy_to_user(user_relocs,
-                                        &relocs[reloc_count],
-                                        exec_list[i].relocation_count *
-                                        sizeof(*relocs));
+       if (seqno == 0)
+               return 0;
  
-               if (unwritten) {
-                       ret = -EFAULT;
-                       goto err;
-               }
+       ret = 0;
+       if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+               /* And wait for the seqno passing without holding any locks and
+                * causing extra latency for others. This is safe as the irq
+                * generation is designed to be run atomically and so is
+                * lockless.
+                */
+               ring->user_irq_get(dev, ring);
+               ret = wait_event_interruptible(ring->irq_queue,
+                                              i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
+                                              || atomic_read(&dev_priv->mm.wedged));
+               ring->user_irq_put(dev, ring);
  
-               reloc_count += exec_list[i].relocation_count;
+               if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+                       ret = -EIO;
        }
  
- err:
-       drm_free_large(relocs);
+       if (ret == 0)
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  
        return ret;
  }
  
  static int
- i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
-                          uint64_t exec_offset)
+ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
+                         uint64_t exec_offset)
  {
        uint32_t exec_start, exec_len;
  
  }
  
  static int
- i915_gem_wait_for_pending_flip(struct drm_device *dev,
-                              struct drm_gem_object **object_list,
-                              int count)
+ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+                  int count)
  {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv;
-       DEFINE_WAIT(wait);
-       int i, ret = 0;
+       int i;
  
-       for (;;) {
-               prepare_to_wait(&dev_priv->pending_flip_queue,
-                               &wait, TASK_INTERRUPTIBLE);
-               for (i = 0; i < count; i++) {
-                       obj_priv = to_intel_bo(object_list[i]);
-                       if (atomic_read(&obj_priv->pending_flip) > 0)
-                               break;
-               }
-               if (i == count)
-                       break;
+       for (i = 0; i < count; i++) {
+               char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+               size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
  
-               if (!signal_pending(current)) {
-                       mutex_unlock(&dev->struct_mutex);
-                       schedule();
-                       mutex_lock(&dev->struct_mutex);
-                       continue;
-               }
-               ret = -ERESTARTSYS;
-               break;
+               if (!access_ok(VERIFY_READ, ptr, length))
+                       return -EFAULT;
+               /* we may also need to update the presumed offsets */
+               if (!access_ok(VERIFY_WRITE, ptr, length))
+                       return -EFAULT;
+               if (fault_in_pages_readable(ptr, length))
+                       return -EFAULT;
        }
-       finish_wait(&dev_priv->pending_flip_queue, &wait);
  
-       return ret;
+       return 0;
  }
  
- int
+ static int
  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
-                      struct drm_file *file_priv,
+                      struct drm_file *file,
                       struct drm_i915_gem_execbuffer2 *args,
                       struct drm_i915_gem_exec_object2 *exec_list)
  {
        struct drm_gem_object *batch_obj;
        struct drm_i915_gem_object *obj_priv;
        struct drm_clip_rect *cliprects = NULL;
-       struct drm_i915_gem_relocation_entry *relocs = NULL;
-       int ret = 0, ret2, i, pinned = 0;
+       struct drm_i915_gem_request *request = NULL;
+       int ret, i, flips;
        uint64_t exec_offset;
-       uint32_t seqno, flush_domains, reloc_index;
-       int pin_tries, flips;
  
        struct intel_ring_buffer *ring = NULL;
  
+       ret = i915_gem_check_is_wedged(dev);
+       if (ret)
+               return ret;
+       ret = validate_exec_list(exec_list, args->buffer_count);
+       if (ret)
+               return ret;
  #if WATCH_EXEC
        DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
                  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
  #endif
-       if (args->flags & I915_EXEC_BSD) {
+       switch (args->flags & I915_EXEC_RING_MASK) {
+       case I915_EXEC_DEFAULT:
+       case I915_EXEC_RENDER:
+               ring = &dev_priv->render_ring;
+               break;
+       case I915_EXEC_BSD:
                if (!HAS_BSD(dev)) {
-                       DRM_ERROR("execbuf with wrong flag\n");
+                       DRM_ERROR("execbuf with invalid ring (BSD)\n");
                        return -EINVAL;
                }
                ring = &dev_priv->bsd_ring;
-       } else {
-               ring = &dev_priv->render_ring;
+               break;
+       case I915_EXEC_BLT:
+               if (!HAS_BLT(dev)) {
+                       DRM_ERROR("execbuf with invalid ring (BLT)\n");
+                       return -EINVAL;
+               }
+               ring = &dev_priv->blt_ring;
+               break;
+       default:
+               DRM_ERROR("execbuf with unknown ring: %d\n",
+                         (int)(args->flags & I915_EXEC_RING_MASK));
+               return -EINVAL;
        }
  
        if (args->buffer_count < 1) {
                }
        }
  
-       ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
-                                           &relocs);
-       if (ret != 0)
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (request == NULL) {
+               ret = -ENOMEM;
                goto pre_mutex_err;
+       }
  
-       mutex_lock(&dev->struct_mutex);
-       i915_verify_inactive(dev, __FILE__, __LINE__);
-       if (atomic_read(&dev_priv->mm.wedged)) {
-               mutex_unlock(&dev->struct_mutex);
-               ret = -EIO;
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
                goto pre_mutex_err;
-       }
  
        if (dev_priv->mm.suspended) {
                mutex_unlock(&dev->struct_mutex);
        }
  
        /* Look up object handles */
-       flips = 0;
        for (i = 0; i < args->buffer_count; i++) {
-               object_list[i] = drm_gem_object_lookup(dev, file_priv,
+               object_list[i] = drm_gem_object_lookup(dev, file,
                                                       exec_list[i].handle);
                if (object_list[i] == NULL) {
                        DRM_ERROR("Invalid object handle %d at index %d\n",
                        goto err;
                }
                obj_priv->in_execbuffer = true;
-               flips += atomic_read(&obj_priv->pending_flip);
-       }
-       if (flips > 0) {
-               ret = i915_gem_wait_for_pending_flip(dev, object_list,
-                                                    args->buffer_count);
-               if (ret)
-                       goto err;
        }
  
-       /* Pin and relocate */
-       for (pin_tries = 0; ; pin_tries++) {
-               ret = 0;
-               reloc_index = 0;
-               for (i = 0; i < args->buffer_count; i++) {
-                       object_list[i]->pending_read_domains = 0;
-                       object_list[i]->pending_write_domain = 0;
-                       ret = i915_gem_object_pin_and_relocate(object_list[i],
-                                                              file_priv,
-                                                              &exec_list[i],
-                                                              &relocs[reloc_index]);
-                       if (ret)
-                               break;
-                       pinned = i + 1;
-                       reloc_index += exec_list[i].relocation_count;
-               }
-               /* success */
-               if (ret == 0)
-                       break;
-               /* error other than GTT full, or we've already tried again */
-               if (ret != -ENOSPC || pin_tries >= 1) {
-                       if (ret != -ERESTARTSYS) {
-                               unsigned long long total_size = 0;
-                               int num_fences = 0;
-                               for (i = 0; i < args->buffer_count; i++) {
-                                       obj_priv = to_intel_bo(object_list[i]);
-                                       total_size += object_list[i]->size;
-                                       num_fences +=
-                                               exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
-                                               obj_priv->tiling_mode != I915_TILING_NONE;
-                               }
-                               DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
-                                         pinned+1, args->buffer_count,
-                                         total_size, num_fences,
-                                         ret);
-                               DRM_ERROR("%d objects [%d pinned], "
-                                         "%d object bytes [%d pinned], "
-                                         "%d/%d gtt bytes\n",
-                                         atomic_read(&dev->object_count),
-                                         atomic_read(&dev->pin_count),
-                                         atomic_read(&dev->object_memory),
-                                         atomic_read(&dev->pin_memory),
-                                         atomic_read(&dev->gtt_memory),
-                                         dev->gtt_total);
-                       }
-                       goto err;
-               }
-               /* unpin all of our buffers */
-               for (i = 0; i < pinned; i++)
-                       i915_gem_object_unpin(object_list[i]);
-               pinned = 0;
+       /* Move the objects en-masse into the GTT, evicting if necessary. */
+       ret = i915_gem_execbuffer_pin(dev, file,
+                                     object_list, exec_list,
+                                     args->buffer_count);
+       if (ret)
+               goto err;
  
-               /* evict everyone we can from the aperture */
-               ret = i915_gem_evict_everything(dev);
-               if (ret && ret != -ENOSPC)
+       /* The objects are in their final locations, apply the relocations. */
+       for (i = 0; i < args->buffer_count; i++) {
+               struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+               obj->base.pending_read_domains = 0;
+               obj->base.pending_write_domain = 0;
+               ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
+               if (ret)
                        goto err;
        }
  
        }
        batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  
-       /* Sanity check the batch buffer, prior to moving objects */
-       exec_offset = exec_list[args->buffer_count - 1].offset;
-       ret = i915_gem_check_execbuffer (args, exec_offset);
+       /* Sanity check the batch buffer */
+       exec_offset = to_intel_bo(batch_obj)->gtt_offset;
+       ret = i915_gem_check_execbuffer(args, exec_offset);
        if (ret != 0) {
                DRM_ERROR("execbuf with invalid offset/length\n");
                goto err;
        }
  
-       i915_verify_inactive(dev, __FILE__, __LINE__);
        /* Zero the global flush/invalidate flags. These
         * will be modified as new domains are computed
         * for each object
         */
        dev->invalidate_domains = 0;
        dev->flush_domains = 0;
-       dev_priv->flush_rings = 0;
+       dev_priv->mm.flush_rings = 0;
  
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
  
                /* Compute new gpu domains and update invalidate/flush */
-               i915_gem_object_set_to_gpu_domain(obj);
+               i915_gem_object_set_to_gpu_domain(obj, ring);
        }
  
-       i915_verify_inactive(dev, __FILE__, __LINE__);
        if (dev->invalidate_domains | dev->flush_domains) {
  #if WATCH_EXEC
                DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
                         dev->invalidate_domains,
                         dev->flush_domains);
  #endif
-               i915_gem_flush(dev,
+               i915_gem_flush(dev, file,
                               dev->invalidate_domains,
-                              dev->flush_domains);
-               if (dev_priv->flush_rings & FLUSH_RENDER_RING)
-                       (void)i915_add_request(dev, file_priv,
-                                              dev->flush_domains,
-                                              &dev_priv->render_ring);
-               if (dev_priv->flush_rings & FLUSH_BSD_RING)
-                       (void)i915_add_request(dev, file_priv,
-                                              dev->flush_domains,
-                                              &dev_priv->bsd_ring);
+                              dev->flush_domains,
+                              dev_priv->mm.flush_rings);
        }
  
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
-               struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
                uint32_t old_write_domain = obj->write_domain;
                obj->write_domain = obj->pending_write_domain;
-               if (obj->write_domain)
-                       list_move_tail(&obj_priv->gpu_write_list,
-                                      &dev_priv->mm.gpu_write_list);
-               else
-                       list_del_init(&obj_priv->gpu_write_list);
                trace_i915_gem_object_change_domain(obj,
                                                    obj->read_domains,
                                                    old_write_domain);
        }
  
-       i915_verify_inactive(dev, __FILE__, __LINE__);
  #if WATCH_COHERENCY
        for (i = 0; i < args->buffer_count; i++) {
                i915_gem_object_check_coherency(object_list[i],
                              ~0);
  #endif
  
+       /* Check for any pending flips. As we only maintain a flip queue depth
+        * of 1, we can simply insert a WAIT for the next display flip prior
+        * to executing the batch and avoid stalling the CPU.
+        */
+       flips = 0;
+       for (i = 0; i < args->buffer_count; i++) {
+               if (object_list[i]->write_domain)
+                       flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
+       }
+       if (flips) {
+               int plane, flip_mask;
+               for (plane = 0; flips >> plane; plane++) {
+                       if (((flips >> plane) & 1) == 0)
+                               continue;
+                       if (plane)
+                               flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+                       else
+                               flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+                       intel_ring_begin(dev, ring, 2);
+                       intel_ring_emit(dev, ring,
+                                       MI_WAIT_FOR_EVENT | flip_mask);
+                       intel_ring_emit(dev, ring, MI_NOOP);
+                       intel_ring_advance(dev, ring);
+               }
+       }
        /* Exec the batchbuffer */
        ret = ring->dispatch_gem_execbuffer(dev, ring, args,
-                       cliprects, exec_offset);
+                                           cliprects, exec_offset);
        if (ret) {
                DRM_ERROR("dispatch failed %d\n", ret);
                goto err;
         * Ensure that the commands in the batch buffer are
         * finished before the interrupt fires
         */
-       flush_domains = i915_retire_commands(dev, ring);
-       i915_verify_inactive(dev, __FILE__, __LINE__);
+       i915_retire_commands(dev, ring);
  
-       /*
-        * Get a seqno representing the execution of the current buffer,
-        * which we can wait on.  We would like to mitigate these interrupts,
-        * likely by only creating seqnos occasionally (so that we have
-        * *some* interrupts representing completion of buffers that we can
-        * wait on when trying to clear up gtt space).
-        */
-       seqno = i915_add_request(dev, file_priv, flush_domains, ring);
-       BUG_ON(seqno == 0);
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
-               obj_priv = to_intel_bo(obj);
  
-               i915_gem_object_move_to_active(obj, seqno, ring);
- #if WATCH_LRU
-               DRM_INFO("%s: move to exec list %p\n", __func__, obj);
- #endif
+               i915_gem_object_move_to_active(obj, ring);
+               if (obj->write_domain)
+                       list_move_tail(&to_intel_bo(obj)->gpu_write_list,
+                                      &ring->gpu_write_list);
        }
- #if WATCH_LRU
-       i915_dump_lru(dev, __func__);
- #endif
  
-       i915_verify_inactive(dev, __FILE__, __LINE__);
+       i915_add_request(dev, file, request, ring);
+       request = NULL;
  
  err:
-       for (i = 0; i < pinned; i++)
-               i915_gem_object_unpin(object_list[i]);
        for (i = 0; i < args->buffer_count; i++) {
                if (object_list[i]) {
                        obj_priv = to_intel_bo(object_list[i]);
        mutex_unlock(&dev->struct_mutex);
  
  pre_mutex_err:
-       /* Copy the updated relocations out regardless of current error
-        * state.  Failure to update the relocs would mean that the next
-        * time userland calls execbuf, it would do so with presumed offset
-        * state that didn't match the actual object state.
-        */
-       ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
-                                          relocs);
-       if (ret2 != 0) {
-               DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
-               if (ret == 0)
-                       ret = ret2;
-       }
        drm_free_large(object_list);
        kfree(cliprects);
+       kfree(request);
  
        return ret;
  }
@@@ -3941,7 -3935,7 +3934,7 @@@ i915_gem_execbuffer(struct drm_device *
                exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
                exec2_list[i].alignment = exec_list[i].alignment;
                exec2_list[i].offset = exec_list[i].offset;
-               if (!IS_I965G(dev))
+               if (INTEL_INFO(dev)->gen < 4)
                        exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
                else
                        exec2_list[i].flags = 0;
  i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
  {
        struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
  
        BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
-       i915_verify_inactive(dev, __FILE__, __LINE__);
+       WARN_ON(i915_verify_lists(dev));
  
        if (obj_priv->gtt_space != NULL) {
                if (alignment == 0)
         * remove it from the inactive list
         */
        if (obj_priv->pin_count == 1) {
-               atomic_inc(&dev->pin_count);
-               atomic_add(obj->size, &dev->pin_memory);
-               if (!obj_priv->active &&
-                   (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
-                       list_del_init(&obj_priv->list);
+               i915_gem_info_add_pin(dev_priv, obj->size);
+               if (!obj_priv->active)
+                       list_move_tail(&obj_priv->mm_list,
+                                      &dev_priv->mm.pinned_list);
        }
-       i915_verify_inactive(dev, __FILE__, __LINE__);
  
+       WARN_ON(i915_verify_lists(dev));
        return 0;
  }
  
@@@ -4089,7 -4082,7 +4081,7 @@@ i915_gem_object_unpin(struct drm_gem_ob
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  
-       i915_verify_inactive(dev, __FILE__, __LINE__);
+       WARN_ON(i915_verify_lists(dev));
        obj_priv->pin_count--;
        BUG_ON(obj_priv->pin_count < 0);
        BUG_ON(obj_priv->gtt_space == NULL);
         * the inactive list
         */
        if (obj_priv->pin_count == 0) {
-               if (!obj_priv->active &&
-                   (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
-                       list_move_tail(&obj_priv->list,
+               if (!obj_priv->active)
+                       list_move_tail(&obj_priv->mm_list,
                                       &dev_priv->mm.inactive_list);
-               atomic_dec(&dev->pin_count);
-               atomic_sub(obj->size, &dev->pin_memory);
+               i915_gem_info_remove_pin(dev_priv, obj->size);
        }
-       i915_verify_inactive(dev, __FILE__, __LINE__);
+       WARN_ON(i915_verify_lists(dev));
  }
  
  int
@@@ -4118,41 -4109,36 +4108,36 @@@ i915_gem_pin_ioctl(struct drm_device *d
        struct drm_i915_gem_object *obj_priv;
        int ret;
  
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
  
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
-               DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
-                         args->handle);
-               mutex_unlock(&dev->struct_mutex);
-               return -ENOENT;
+               ret = -ENOENT;
+               goto unlock;
        }
        obj_priv = to_intel_bo(obj);
  
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to pin a purgeable buffer\n");
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
  
        if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
                DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
  
        obj_priv->user_pin_count++;
        obj_priv->pin_filp = file_priv;
        if (obj_priv->user_pin_count == 1) {
                ret = i915_gem_object_pin(obj, args->alignment);
-               if (ret != 0) {
-                       drm_gem_object_unreference(obj);
-                       mutex_unlock(&dev->struct_mutex);
-                       return ret;
-               }
+               if (ret)
+                       goto out;
        }
  
        /* XXX - flush the CPU caches for pinned objects
         */
        i915_gem_object_flush_cpu_write_domain(obj);
        args->offset = obj_priv->gtt_offset;
+ out:
        drm_gem_object_unreference(obj);
+ unlock:
        mutex_unlock(&dev->struct_mutex);
-       return 0;
+       return ret;
  }
  
  int
@@@ -4173,24 -4160,24 +4159,24 @@@ i915_gem_unpin_ioctl(struct drm_device 
        struct drm_i915_gem_pin *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       int ret;
  
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
  
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
-               DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
-                         args->handle);
-               mutex_unlock(&dev->struct_mutex);
-               return -ENOENT;
+               ret = -ENOENT;
+               goto unlock;
        }
        obj_priv = to_intel_bo(obj);
        if (obj_priv->pin_filp != file_priv) {
                DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
        obj_priv->user_pin_count--;
        if (obj_priv->user_pin_count == 0) {
                i915_gem_object_unpin(obj);
        }
  
+ out:
        drm_gem_object_unreference(obj);
+ unlock:
        mutex_unlock(&dev->struct_mutex);
-       return 0;
+       return ret;
  }
  
  int
@@@ -4210,22 -4199,24 +4198,24 @@@ i915_gem_busy_ioctl(struct drm_device *
        struct drm_i915_gem_busy *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       int ret;
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
  
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
-               DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
-                         args->handle);
-               return -ENOENT;
+               ret = -ENOENT;
+               goto unlock;
        }
-       mutex_lock(&dev->struct_mutex);
+       obj_priv = to_intel_bo(obj);
  
        /* Count all active objects as busy, even if they are currently not used
         * by the gpu. Users of this interface expect objects to eventually
         * become non-busy without any further actions, therefore emit any
         * necessary flushes here.
         */
-       obj_priv = to_intel_bo(obj);
        args->busy = obj_priv->active;
        if (args->busy) {
                /* Unconditionally flush objects, even when the gpu still uses this
                 * use this buffer rather sooner than later, so issuing the required
                 * flush earlier is beneficial.
                 */
-               if (obj->write_domain) {
-                       i915_gem_flush(dev, 0, obj->write_domain);
-                       (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
-               }
+               if (obj->write_domain & I915_GEM_GPU_DOMAINS)
+                       i915_gem_flush_ring(dev, file_priv,
+                                           obj_priv->ring,
+                                           0, obj->write_domain);
  
                /* Update the active list for the hardware's current position.
                 * Otherwise this only updates on a delayed timer or when irqs
        }
  
        drm_gem_object_unreference(obj);
+ unlock:
        mutex_unlock(&dev->struct_mutex);
-       return 0;
+       return ret;
  }
  
  int
@@@ -4267,6 -4259,7 +4258,7 @@@ i915_gem_madvise_ioctl(struct drm_devic
        struct drm_i915_gem_madvise *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       int ret;
  
        switch (args->madv) {
        case I915_MADV_DONTNEED:
            return -EINVAL;
        }
  
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
-               DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
-                         args->handle);
-               return -ENOENT;
+               ret = -ENOENT;
+               goto unlock;
        }
-       mutex_lock(&dev->struct_mutex);
        obj_priv = to_intel_bo(obj);
  
        if (obj_priv->pin_count) {
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
  
        if (obj_priv->madv != __I915_MADV_PURGED)
  
        args->retained = obj_priv->madv != __I915_MADV_PURGED;
  
+ out:
        drm_gem_object_unreference(obj);
+ unlock:
        mutex_unlock(&dev->struct_mutex);
-       return 0;
+       return ret;
  }
  
  struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
                                              size_t size)
  {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
  
        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
                return NULL;
        }
  
+       i915_gem_info_add_obj(dev_priv, size);
        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  
        obj->agp_type = AGP_USER_MEMORY;
        obj->base.driver_private = NULL;
        obj->fence_reg = I915_FENCE_REG_NONE;
-       INIT_LIST_HEAD(&obj->list);
+       INIT_LIST_HEAD(&obj->mm_list);
+       INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->gpu_write_list);
        obj->madv = I915_MADV_WILLNEED;
  
-       trace_i915_gem_object_create(&obj->base);
        return &obj->base;
  }
  
@@@ -4355,7 -4349,7 +4348,7 @@@ static void i915_gem_free_object_tail(s
  
        ret = i915_gem_object_unbind(obj);
        if (ret == -ERESTARTSYS) {
-               list_move(&obj_priv->list,
+               list_move(&obj_priv->mm_list,
                          &dev_priv->mm.deferred_free_list);
                return;
        }
                i915_gem_free_mmap_offset(obj);
  
        drm_gem_object_release(obj);
+       i915_gem_info_remove_obj(dev_priv, obj->size);
  
        kfree(obj_priv->page_cpu_valid);
        kfree(obj_priv->bit_17);
@@@ -4394,10 -4389,7 +4388,7 @@@ i915_gem_idle(struct drm_device *dev
  
        mutex_lock(&dev->struct_mutex);
  
-       if (dev_priv->mm.suspended ||
-                       (dev_priv->render_ring.gem_object == NULL) ||
-                       (HAS_BSD(dev) &&
-                        dev_priv->bsd_ring.gem_object == NULL)) {
+       if (dev_priv->mm.suspended) {
                mutex_unlock(&dev->struct_mutex);
                return 0;
        }
         * And not confound mm.suspended!
         */
        dev_priv->mm.suspended = 1;
-       del_timer(&dev_priv->hangcheck_timer);
+       del_timer_sync(&dev_priv->hangcheck_timer);
  
        i915_kernel_lost_context(dev);
        i915_gem_cleanup_ringbuffer(dev);
@@@ -4502,36 -4494,34 +4493,34 @@@ i915_gem_init_ringbuffer(struct drm_dev
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
  
-       dev_priv->render_ring = render_ring;
-       if (!I915_NEED_GFX_HWS(dev)) {
-               dev_priv->render_ring.status_page.page_addr
-                       = dev_priv->status_page_dmah->vaddr;
-               memset(dev_priv->render_ring.status_page.page_addr,
-                               0, PAGE_SIZE);
-       }
        if (HAS_PIPE_CONTROL(dev)) {
                ret = i915_gem_init_pipe_control(dev);
                if (ret)
                        return ret;
        }
  
-       ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+       ret = intel_init_render_ring_buffer(dev);
        if (ret)
                goto cleanup_pipe_control;
  
        if (HAS_BSD(dev)) {
-               dev_priv->bsd_ring = bsd_ring;
-               ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+               ret = intel_init_bsd_ring_buffer(dev);
                if (ret)
                        goto cleanup_render_ring;
        }
  
+       if (HAS_BLT(dev)) {
+               ret = intel_init_blt_ring_buffer(dev);
+               if (ret)
+                       goto cleanup_bsd_ring;
+       }
        dev_priv->next_seqno = 1;
  
        return 0;
  
+ cleanup_bsd_ring:
+       intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
  cleanup_render_ring:
        intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
  cleanup_pipe_control:
@@@ -4546,8 -4536,8 +4535,8 @@@ i915_gem_cleanup_ringbuffer(struct drm_
        drm_i915_private_t *dev_priv = dev->dev_private;
  
        intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-       if (HAS_BSD(dev))
-               intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+       intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+       intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
        if (HAS_PIPE_CONTROL(dev))
                i915_gem_cleanup_pipe_control(dev);
  }
@@@ -4576,15 -4566,15 +4565,15 @@@ i915_gem_entervt_ioctl(struct drm_devic
                return ret;
        }
  
-       spin_lock(&dev_priv->mm.active_list_lock);
+       BUG_ON(!list_empty(&dev_priv->mm.active_list));
        BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
-       BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
-       spin_unlock(&dev_priv->mm.active_list_lock);
+       BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
+       BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
        BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
        BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
-       BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
+       BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
+       BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
        mutex_unlock(&dev->struct_mutex);
  
        ret = drm_irq_install(dev);
@@@ -4626,28 -4616,34 +4615,34 @@@ i915_gem_lastclose(struct drm_device *d
                DRM_ERROR("failed to idle hardware: %d\n", ret);
  }
  
+ static void
+ init_ring_lists(struct intel_ring_buffer *ring)
+ {
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       INIT_LIST_HEAD(&ring->gpu_write_list);
+ }
  void
  i915_gem_load(struct drm_device *dev)
  {
        int i;
        drm_i915_private_t *dev_priv = dev->dev_private;
  
-       spin_lock_init(&dev_priv->mm.active_list_lock);
+       INIT_LIST_HEAD(&dev_priv->mm.active_list);
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
-       INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+       INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
-       INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
-       INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
-       if (HAS_BSD(dev)) {
-               INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
-               INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
-       }
+       init_ring_lists(&dev_priv->render_ring);
+       init_ring_lists(&dev_priv->bsd_ring);
+       init_ring_lists(&dev_priv->blt_ring);
        for (i = 0; i < 16; i++)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
+       init_completion(&dev_priv->error_completion);
        spin_lock(&shrink_list_lock);
        list_add(&dev_priv->mm.shrink_list, &shrink_list);
        spin_unlock(&shrink_list_lock);
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                dev_priv->fence_reg_start = 3;
  
-       if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+       if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                dev_priv->num_fence_regs = 16;
        else
                dev_priv->num_fence_regs = 8;
  
        /* Initialize fence registers to zero */
-       if (IS_I965G(dev)) {
+       switch (INTEL_INFO(dev)->gen) {
+       case 6:
+               for (i = 0; i < 16; i++)
+                       I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
+               break;
+       case 5:
+       case 4:
                for (i = 0; i < 16; i++)
                        I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
-       } else {
-               for (i = 0; i < 8; i++)
-                       I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+               break;
+       case 3:
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                        for (i = 0; i < 8; i++)
                                I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+       case 2:
+               for (i = 0; i < 8; i++)
+                       I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+               break;
        }
        i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
   * Create a physically contiguous memory object for this object
   * e.g. for cursor + overlay regs
   */
- int i915_gem_init_phys_object(struct drm_device *dev,
-                             int id, int size, int align)
static int i915_gem_init_phys_object(struct drm_device *dev,
+                                    int id, int size, int align)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_phys_object *phys_obj;
@@@ -4723,7 -4728,7 +4727,7 @@@ kfree_obj
        return ret;
  }
  
- void i915_gem_free_phys_object(struct drm_device *dev, int id)
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_phys_object *phys_obj;
@@@ -4771,11 -4776,11 +4775,11 @@@ void i915_gem_detach_phys_object(struc
        page_count = obj->size / PAGE_SIZE;
  
        for (i = 0; i < page_count; i++) {
 -              char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
 +              char *dst = kmap_atomic(obj_priv->pages[i]);
                char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  
                memcpy(dst, src, PAGE_SIZE);
 -              kunmap_atomic(dst, KM_USER0);
 +              kunmap_atomic(dst);
        }
        drm_clflush_pages(obj_priv->pages, page_count);
        drm_agp_chipset_flush(dev);
@@@ -4832,11 -4837,11 +4836,11 @@@ i915_gem_attach_phys_object(struct drm_
        page_count = obj->size / PAGE_SIZE;
  
        for (i = 0; i < page_count; i++) {
 -              char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
 +              char *src = kmap_atomic(obj_priv->pages[i]);
                char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  
                memcpy(dst, src, PAGE_SIZE);
 -              kunmap_atomic(src, KM_USER0);
 +              kunmap_atomic(src);
        }
  
        i915_gem_object_put_pages(obj);
@@@ -4868,18 -4873,25 +4872,25 @@@ i915_gem_phys_pwrite(struct drm_device 
        return 0;
  }
  
- void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
+ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  {
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+       struct drm_i915_file_private *file_priv = file->driver_priv;
  
        /* Clean up our request list when the client is going away, so that
         * later retire_requests won't dereference our soon-to-be-gone
         * file_priv.
         */
-       mutex_lock(&dev->struct_mutex);
-       while (!list_empty(&i915_file_priv->mm.request_list))
-               list_del_init(i915_file_priv->mm.request_list.next);
-       mutex_unlock(&dev->struct_mutex);
+       spin_lock(&file_priv->mm.lock);
+       while (!list_empty(&file_priv->mm.request_list)) {
+               struct drm_i915_gem_request *request;
+               request = list_first_entry(&file_priv->mm.request_list,
+                                          struct drm_i915_gem_request,
+                                          client_list);
+               list_del(&request->client_list);
+               request->file_priv = NULL;
+       }
+       spin_unlock(&file_priv->mm.lock);
  }
  
  static int
@@@ -4888,12 -4900,10 +4899,10 @@@ i915_gpu_is_active(struct drm_device *d
        drm_i915_private_t *dev_priv = dev->dev_private;
        int lists_empty;
  
-       spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-                     list_empty(&dev_priv->render_ring.active_list);
-       if (HAS_BSD(dev))
-               lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
-       spin_unlock(&dev_priv->mm.active_list_lock);
+                     list_empty(&dev_priv->render_ring.active_list) &&
+                     list_empty(&dev_priv->bsd_ring.active_list) &&
+                     list_empty(&dev_priv->blt_ring.active_list);
  
        return !lists_empty;
  }
@@@ -4915,7 -4925,7 +4924,7 @@@ i915_gem_shrink(struct shrinker *shrink
                        if (mutex_trylock(&dev->struct_mutex)) {
                                list_for_each_entry(obj_priv,
                                                    &dev_priv->mm.inactive_list,
-                                                   list)
+                                                   mm_list)
                                        cnt++;
                                mutex_unlock(&dev->struct_mutex);
                        }
@@@ -4941,7 -4951,7 +4950,7 @@@ rescan
  
                list_for_each_entry_safe(obj_priv, next_obj,
                                         &dev_priv->mm.inactive_list,
-                                        list) {
+                                        mm_list) {
                        if (i915_gem_object_is_purgeable(obj_priv)) {
                                i915_gem_object_unbind(&obj_priv->base);
                                if (--nr_to_scan <= 0)
  
                list_for_each_entry_safe(obj_priv, next_obj,
                                         &dev_priv->mm.inactive_list,
-                                        list) {
+                                        mm_list) {
                        if (nr_to_scan > 0) {
                                i915_gem_object_unbind(&obj_priv->base);
                                nr_to_scan--;
@@@ -85,7 -85,7 +85,7 @@@ ironlake_disable_graphics_irq(drm_i915_
  }
  
  /* For display hotplug interrupt */
- void
static void
  ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  {
        if ((dev_priv->irq_mask_reg & mask) != 0) {
@@@ -172,7 -172,7 +172,7 @@@ void intel_enable_asle (struct drm_devi
        else {
                i915_enable_pipestat(dev_priv, 1,
                                     PIPE_LEGACY_BLC_EVENT_ENABLE);
-               if (IS_I965G(dev))
+               if (INTEL_INFO(dev)->gen >= 4)
                        i915_enable_pipestat(dev_priv, 0,
                                             PIPE_LEGACY_BLC_EVENT_ENABLE);
        }
@@@ -191,12 -191,7 +191,7 @@@ static in
  i915_pipe_enabled(struct drm_device *dev, int pipe)
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
-       if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
-               return 1;
-       return 0;
+       return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
  }
  
  /* Called from drm generic code, passed a 'crtc', which
@@@ -207,10 -202,7 +202,7 @@@ u32 i915_get_vblank_counter(struct drm_
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long high_frame;
        unsigned long low_frame;
-       u32 high1, high2, low, count;
-       high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
-       low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+       u32 high1, high2, low;
  
        if (!i915_pipe_enabled(dev, pipe)) {
                DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
                return 0;
        }
  
+       high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
+       low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
        /*
         * High & low register fields aren't synchronized, so make sure
         * we get a low value that's stable across two reads of the high
         * register.
         */
        do {
-               high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
-                        PIPE_FRAME_HIGH_SHIFT);
-               low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
-                       PIPE_FRAME_LOW_SHIFT);
-               high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
-                        PIPE_FRAME_HIGH_SHIFT);
+               high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+               low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
+               high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
        } while (high1 != high2);
  
-       count = (high1 << 8) | low;
-       return count;
+       high1 >>= PIPE_FRAME_HIGH_SHIFT;
+       low >>= PIPE_FRAME_LOW_SHIFT;
+       return (high1 << 8) | low;
  }
  
  u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@@ -260,16 -252,12 +252,12 @@@ static void i915_hotplug_work_func(stru
                                                    hotplug_work);
        struct drm_device *dev = dev_priv->dev;
        struct drm_mode_config *mode_config = &dev->mode_config;
-       struct drm_encoder *encoder;
-       if (mode_config->num_encoder) {
-               list_for_each_entry(encoder, &mode_config->encoder_list, head) {
-                       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-       
-                       if (intel_encoder->hot_plug)
-                               (*intel_encoder->hot_plug) (intel_encoder);
-               }
-       }
+       struct intel_encoder *encoder;
+       list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+               if (encoder->hot_plug)
+                       encoder->hot_plug(encoder);
        /* Just fire off a uevent and let userspace tell us what to do */
        drm_helper_hpd_irq_event(dev);
  }
@@@ -305,13 -293,30 +293,30 @@@ static void i915_handle_rps_change(stru
        return;
  }
  
- irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+ static void notify_ring(struct drm_device *dev,
+                       struct intel_ring_buffer *ring)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 seqno = ring->get_seqno(dev, ring);
+       ring->irq_gem_seqno = seqno;
+       trace_i915_gem_request_complete(dev, seqno);
+       wake_up_all(&ring->irq_queue);
+       dev_priv->hangcheck_count = 0;
+       mod_timer(&dev_priv->hangcheck_timer,
+                 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ }
+ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int ret = IRQ_NONE;
        u32 de_iir, gt_iir, de_ier, pch_iir;
+       u32 hotplug_mask;
        struct drm_i915_master_private *master_priv;
-       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+       u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
+       if (IS_GEN6(dev))
+               bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
  
        /* disable master interrupt before clearing iir  */
        de_ier = I915_READ(DEIER);
        if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
                goto done;
  
+       if (HAS_PCH_CPT(dev))
+               hotplug_mask = SDE_HOTPLUG_MASK_CPT;
+       else
+               hotplug_mask = SDE_HOTPLUG_MASK;
        ret = IRQ_HANDLED;
  
        if (dev->primary->master) {
                                READ_BREADCRUMB(dev_priv);
        }
  
-       if (gt_iir & GT_PIPE_NOTIFY) {
-               u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
-               render_ring->irq_gem_seqno = seqno;
-               trace_i915_gem_request_complete(dev, seqno);
-               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
-               dev_priv->hangcheck_count = 0;
-               mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
-       }
-       if (gt_iir & GT_BSD_USER_INTERRUPT)
-               DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+       if (gt_iir & GT_PIPE_NOTIFY)
+               notify_ring(dev, &dev_priv->render_ring);
+       if (gt_iir & bsd_usr_interrupt)
+               notify_ring(dev, &dev_priv->bsd_ring);
+       if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
+               notify_ring(dev, &dev_priv->blt_ring);
  
        if (de_iir & DE_GSE)
-               ironlake_opregion_gse_intr(dev);
+               intel_opregion_gse_intr(dev);
  
        if (de_iir & DE_PLANEA_FLIP_DONE) {
                intel_prepare_page_flip(dev, 0);
-               intel_finish_page_flip(dev, 0);
+               intel_finish_page_flip_plane(dev, 0);
        }
  
        if (de_iir & DE_PLANEB_FLIP_DONE) {
                intel_prepare_page_flip(dev, 1);
-               intel_finish_page_flip(dev, 1);
+               intel_finish_page_flip_plane(dev, 1);
        }
  
        if (de_iir & DE_PIPEA_VBLANK)
                drm_handle_vblank(dev, 1);
  
        /* check event from PCH */
-       if ((de_iir & DE_PCH_EVENT) &&
-           (pch_iir & SDE_HOTPLUG_MASK)) {
+       if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
                queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-       }
  
        if (de_iir & DE_PCU_EVENT) {
                I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
@@@ -404,23 -407,20 +407,20 @@@ static void i915_error_work_func(struc
        char *reset_event[] = { "RESET=1", NULL };
        char *reset_done_event[] = { "ERROR=0", NULL };
  
-       DRM_DEBUG_DRIVER("generating error event\n");
        kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
  
        if (atomic_read(&dev_priv->mm.wedged)) {
-               if (IS_I965G(dev)) {
-                       DRM_DEBUG_DRIVER("resetting chip\n");
-                       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
-                       if (!i965_reset(dev, GDRST_RENDER)) {
-                               atomic_set(&dev_priv->mm.wedged, 0);
-                               kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
-                       }
-               } else {
-                       DRM_DEBUG_DRIVER("reboot required\n");
+               DRM_DEBUG_DRIVER("resetting chip\n");
+               kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
+               if (!i915_reset(dev, GRDOM_RENDER)) {
+                       atomic_set(&dev_priv->mm.wedged, 0);
+                       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
                }
+               complete_all(&dev_priv->error_completion);
        }
  }
  
+ #ifdef CONFIG_DEBUG_FS
  static struct drm_i915_error_object *
  i915_error_object_create(struct drm_device *dev,
                         struct drm_gem_object *src)
  
                local_irq_save(flags);
                s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 -                                           reloc_offset,
 -                                           KM_IRQ0);
 +                                           reloc_offset);
                memcpy_fromio(d, s, PAGE_SIZE);
 -              io_mapping_unmap_atomic(s, KM_IRQ0);
 +              io_mapping_unmap_atomic(s);
                local_irq_restore(flags);
  
                dst->pages[page] = d;
@@@ -510,7 -511,7 +510,7 @@@ i915_get_bbaddr(struct drm_device *dev
  
        if (IS_I830(dev) || IS_845G(dev))
                cmd = MI_BATCH_BUFFER;
-       else if (IS_I965G(dev))
+       else if (INTEL_INFO(dev)->gen >= 4)
                cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
                       MI_BATCH_NON_SECURE_I965);
        else
@@@ -583,13 -584,16 +583,16 @@@ static void i915_capture_error_state(st
                return;
        }
  
-       error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
+       DRM_DEBUG_DRIVER("generating error event\n");
+       error->seqno =
+               dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
        error->eir = I915_READ(EIR);
        error->pgtbl_er = I915_READ(PGTBL_ER);
        error->pipeastat = I915_READ(PIPEASTAT);
        error->pipebstat = I915_READ(PIPEBSTAT);
        error->instpm = I915_READ(INSTPM);
-       if (!IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen < 4) {
                error->ipeir = I915_READ(IPEIR);
                error->ipehr = I915_READ(IPEHR);
                error->instdone = I915_READ(INSTDONE);
        batchbuffer[0] = NULL;
        batchbuffer[1] = NULL;
        count = 0;
-       list_for_each_entry(obj_priv,
-                       &dev_priv->render_ring.active_list, list) {
+       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
                struct drm_gem_object *obj = &obj_priv->base;
  
                if (batchbuffer[0] == NULL &&
        }
        /* Scan the other lists for completeness for those bizarre errors. */
        if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-               list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+               list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
                        struct drm_gem_object *obj = &obj_priv->base;
  
                        if (batchbuffer[0] == NULL &&
                }
        }
        if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-               list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+               list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
                        struct drm_gem_object *obj = &obj_priv->base;
  
                        if (batchbuffer[0] == NULL &&
        }
  
        /* We need to copy these to an anonymous buffer as the simplest
-        * method to avoid being overwritten by userpace.
+        * method to avoid being overwritten by userspace.
         */
        error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
        if (batchbuffer[1] != batchbuffer[0])
  
        if (error->active_bo) {
                int i = 0;
-               list_for_each_entry(obj_priv,
-                               &dev_priv->render_ring.active_list, list) {
+               list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
                        struct drm_gem_object *obj = &obj_priv->base;
  
                        error->active_bo[i].size = obj->size;
@@@ -743,6 -744,9 +743,9 @@@ void i915_destroy_error_state(struct dr
        if (error)
                i915_error_state_free(dev, error);
  }
+ #else
+ #define i915_capture_error_state(x)
+ #endif
  
  static void i915_report_and_clear_eir(struct drm_device *dev)
  {
                }
        }
  
-       if (IS_I9XX(dev)) {
+       if (!IS_GEN2(dev)) {
                if (eir & I915_ERROR_PAGE_TABLE) {
                        u32 pgtbl_err = I915_READ(PGTBL_ER);
                        printk(KERN_ERR "page table error\n");
                printk(KERN_ERR "instruction error\n");
                printk(KERN_ERR "  INSTPM: 0x%08x\n",
                       I915_READ(INSTPM));
-               if (!IS_I965G(dev)) {
+               if (INTEL_INFO(dev)->gen < 4) {
                        u32 ipeir = I915_READ(IPEIR);
  
                        printk(KERN_ERR "  IPEIR: 0x%08x\n",
@@@ -875,12 -879,17 +878,17 @@@ static void i915_handle_error(struct dr
        i915_report_and_clear_eir(dev);
  
        if (wedged) {
+               INIT_COMPLETION(dev_priv->error_completion);
                atomic_set(&dev_priv->mm.wedged, 1);
  
                /*
                 * Wakeup waiting processes so they don't hang
                 */
-               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+               wake_up_all(&dev_priv->render_ring.irq_queue);
+               if (HAS_BSD(dev))
+                       wake_up_all(&dev_priv->bsd_ring.irq_queue);
+               if (HAS_BLT(dev))
+                       wake_up_all(&dev_priv->blt_ring.irq_queue);
        }
  
        queue_work(dev_priv->wq, &dev_priv->error_work);
@@@ -911,7 -920,7 +919,7 @@@ static void i915_pageflip_stall_check(s
  
        /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
        obj_priv = to_intel_bo(work->pending_flip_obj);
-       if(IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
                int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
                stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
        } else {
@@@ -941,7 -950,6 +949,6 @@@ irqreturn_t i915_driver_irq_handler(DRM
        unsigned long irqflags;
        int irq_received;
        int ret = IRQ_NONE;
-       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
  
        atomic_inc(&dev_priv->irq_received);
  
  
        iir = I915_READ(IIR);
  
-       if (IS_I965G(dev))
+       if (INTEL_INFO(dev)->gen >= 4)
                vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
        else
                vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
                                        READ_BREADCRUMB(dev_priv);
                }
  
-               if (iir & I915_USER_INTERRUPT) {
-                       u32 seqno =
-                               render_ring->get_gem_seqno(dev, render_ring);
-                       render_ring->irq_gem_seqno = seqno;
-                       trace_i915_gem_request_complete(dev, seqno);
-                       DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
-                       dev_priv->hangcheck_count = 0;
-                       mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
-               }
+               if (iir & I915_USER_INTERRUPT)
+                       notify_ring(dev, &dev_priv->render_ring);
                if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
-                       DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+                       notify_ring(dev, &dev_priv->bsd_ring);
  
                if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
                        intel_prepare_page_flip(dev, 0);
                if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
                    (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
                    (iir & I915_ASLE_INTERRUPT))
-                       opregion_asle_intr(dev);
+                       intel_opregion_asle_intr(dev);
  
                /* With MSI, interrupts are only generated when iir
                 * transitions from zero to nonzero.  If another bit got
@@@ -1206,18 -1206,15 +1205,15 @@@ int i915_enable_vblank(struct drm_devic
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
-       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-       u32 pipeconf;
  
-       pipeconf = I915_READ(pipeconf_reg);
-       if (!(pipeconf & PIPEACONF_ENABLE))
+       if (!i915_pipe_enabled(dev, pipe))
                return -EINVAL;
  
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
        if (HAS_PCH_SPLIT(dev))
                ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 
                                            DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
-       else if (IS_I965G(dev))
+       else if (INTEL_INFO(dev)->gen >= 4)
                i915_enable_pipestat(dev_priv, pipe,
                                     PIPE_START_VBLANK_INTERRUPT_ENABLE);
        else
@@@ -1251,7 -1248,7 +1247,7 @@@ void i915_enable_interrupt (struct drm_
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        if (!HAS_PCH_SPLIT(dev))
-               opregion_enable_asle(dev);
+               intel_opregion_enable_asle(dev);
        dev_priv->irq_enabled = 1;
  }
  
@@@ -1310,7 -1307,7 +1306,7 @@@ int i915_vblank_swap(struct drm_device 
        return -EINVAL;
  }
  
- struct drm_i915_gem_request *
+ static struct drm_i915_gem_request *
  i915_get_tail_request(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@@ -1330,11 -1327,7 +1326,7 @@@ void i915_hangcheck_elapsed(unsigned lo
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t acthd, instdone, instdone1;
  
-       /* No reset support on this chip yet. */
-       if (IS_GEN6(dev))
-               return;
-       if (!IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen < 4) {
                acthd = I915_READ(ACTHD);
                instdone = I915_READ(INSTDONE);
                instdone1 = 0;
  
        /* If all work is done then ACTHD clearly hasn't advanced. */
        if (list_empty(&dev_priv->render_ring.request_list) ||
-               i915_seqno_passed(i915_get_gem_seqno(dev,
-                               &dev_priv->render_ring),
-                       i915_get_tail_request(dev)->seqno)) {
+               i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
+                                 i915_get_tail_request(dev)->seqno)) {
                bool missed_wakeup = false;
  
                dev_priv->hangcheck_count = 0;
                /* Issue a wake-up to catch stuck h/w. */
                if (dev_priv->render_ring.waiting_gem_seqno &&
                    waitqueue_active(&dev_priv->render_ring.irq_queue)) {
-                       DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+                       wake_up_all(&dev_priv->render_ring.irq_queue);
                        missed_wakeup = true;
                }
  
                if (dev_priv->bsd_ring.waiting_gem_seqno &&
                    waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
-                       DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+                       wake_up_all(&dev_priv->bsd_ring.irq_queue);
+                       missed_wakeup = true;
+               }
+               if (dev_priv->blt_ring.waiting_gem_seqno &&
+                   waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
+                       wake_up_all(&dev_priv->blt_ring.irq_queue);
                        missed_wakeup = true;
                }
  
            dev_priv->last_instdone1 == instdone1) {
                if (dev_priv->hangcheck_count++ > 1) {
                        DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+                       if (!IS_GEN2(dev)) {
+                               /* Is the chip hanging on a WAIT_FOR_EVENT?
+                                * If so we can simply poke the RB_WAIT bit
+                                * and break the hang. This should work on
+                                * all but the second generation chipsets.
+                                */
+                               u32 tmp = I915_READ(PRB0_CTL);
+                               if (tmp & RING_WAIT) {
+                                       I915_WRITE(PRB0_CTL, tmp);
+                                       POSTING_READ(PRB0_CTL);
+                                       goto out;
+                               }
+                       }
                        i915_handle_error(dev, true);
                        return;
                }
                dev_priv->last_instdone1 = instdone1;
        }
  
+ out:
        /* Reset timer case chip hangs without another request being added */
-       mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+       mod_timer(&dev_priv->hangcheck_timer,
+                 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
  }
  
  /* drm_dma.h hooks
@@@ -1423,8 -1438,7 +1437,7 @@@ static int ironlake_irq_postinstall(str
        u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
                           DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
        u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
-       u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
-                          SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+       u32 hotplug_mask;
  
        dev_priv->irq_mask_reg = ~display_mask;
        dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
        I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
        (void) I915_READ(DEIER);
  
-       /* Gen6 only needs render pipe_control now */
-       if (IS_GEN6(dev))
-               render_mask = GT_PIPE_NOTIFY;
+       if (IS_GEN6(dev)) {
+               render_mask =
+                       GT_PIPE_NOTIFY |
+                       GT_GEN6_BSD_USER_INTERRUPT |
+                       GT_BLT_USER_INTERRUPT;
+       }
  
        dev_priv->gt_irq_mask_reg = ~render_mask;
        dev_priv->gt_irq_enable_reg = render_mask;
  
        I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
-       if (IS_GEN6(dev))
+       if (IS_GEN6(dev)) {
                I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
+               I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
+               I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
+       }
        I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
        (void) I915_READ(GTIER);
  
+       if (HAS_PCH_CPT(dev)) {
+               hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT  |
+                              SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
+       } else {
+               hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
+                              SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+       }
        dev_priv->pch_irq_mask_reg = ~hotplug_mask;
        dev_priv->pch_irq_enable_reg = hotplug_mask;
  
@@@ -1505,9 -1534,10 +1533,10 @@@ int i915_driver_irq_postinstall(struct 
        u32 error_mask;
  
        DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
        if (HAS_BSD(dev))
                DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
+       if (HAS_BLT(dev))
+               DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
  
        dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  
                I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
        }
  
-       opregion_enable_asle(dev);
+       intel_opregion_enable_asle(dev);
  
        return 0;
  }
@@@ -170,56 -170,143 +170,143 @@@ struct overlay_registers 
      u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
  };
  
- /* overlay flip addr flag */
- #define OFC_UPDATE            0x1
- #define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
- #define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
+ struct intel_overlay {
+       struct drm_device *dev;
+       struct intel_crtc *crtc;
+       struct drm_i915_gem_object *vid_bo;
+       struct drm_i915_gem_object *old_vid_bo;
+       int active;
+       int pfit_active;
+       u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+       u32 color_key;
+       u32 brightness, contrast, saturation;
+       u32 old_xscale, old_yscale;
+       /* register access */
+       u32 flip_addr;
+       struct drm_i915_gem_object *reg_bo;
+       /* flip handling */
+       uint32_t last_flip_req;
+       void (*flip_tail)(struct intel_overlay *);
+ };
  
- static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+ static struct overlay_registers *
+ intel_overlay_map_regs(struct intel_overlay *overlay)
  {
          drm_i915_private_t *dev_priv = overlay->dev->dev_private;
        struct overlay_registers *regs;
  
-       /* no recursive mappings */
-       BUG_ON(overlay->virt_addr);
+       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+               regs = overlay->reg_bo->phys_obj->handle->vaddr;
+       else
+               regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
+                                        overlay->reg_bo->gtt_offset);
  
-       if (OVERLAY_NONPHYSICAL(overlay->dev)) {
-               regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-                                               overlay->reg_bo->gtt_offset);
+       return regs;
+ }
  
-               if (!regs) {
-                       DRM_ERROR("failed to map overlay regs in GTT\n");
-                       return NULL;
-               }
-       } else
-               regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
+                                    struct overlay_registers *regs)
+ {
+       if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+               io_mapping_unmap(regs);
+ }
+ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+                                        struct drm_i915_gem_request *request,
+                                        bool interruptible,
+                                        void (*tail)(struct intel_overlay *))
+ {
+       struct drm_device *dev = overlay->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret;
+       BUG_ON(overlay->last_flip_req);
+       overlay->last_flip_req =
+               i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+       if (overlay->last_flip_req == 0)
+               return -ENOMEM;
  
-       return overlay->virt_addr = regs;
+       overlay->flip_tail = tail;
+       ret = i915_do_wait_request(dev,
+                                  overlay->last_flip_req, true,
+                                  &dev_priv->render_ring);
+       if (ret)
+               return ret;
+       overlay->last_flip_req = 0;
+       return 0;
  }
  
- static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
+ /* Workaround for i830 bug where pipe a must be enable to change control regs */
+ static int
+ i830_activate_pipe_a(struct drm_device *dev)
  {
-       if (OVERLAY_NONPHYSICAL(overlay->dev))
-               io_mapping_unmap_atomic(overlay->virt_addr);
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_crtc *crtc;
+       struct drm_crtc_helper_funcs *crtc_funcs;
+       struct drm_display_mode vesa_640x480 = {
+               DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+                        752, 800, 0, 480, 489, 492, 525, 0,
+                        DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
+       }, *mode;
+       crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
+       if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
+               return 0;
  
-       overlay->virt_addr = NULL;
+       /* most i8xx have pipe a forced on, so don't trust dpms mode */
+       if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
+               return 0;
  
-       return;
+       crtc_funcs = crtc->base.helper_private;
+       if (crtc_funcs->dpms == NULL)
+               return 0;
+       DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
+       mode = drm_mode_duplicate(dev, &vesa_640x480);
+       drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+       if(!drm_crtc_helper_set_mode(&crtc->base, mode,
+                                      crtc->base.x, crtc->base.y,
+                                      crtc->base.fb))
+               return 0;
+       crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
+       return 1;
+ }
+ static void
+ i830_deactivate_pipe_a(struct drm_device *dev)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  }
  
  /* overlay needs to be disable in OCMD reg */
  static int intel_overlay_on(struct intel_overlay *overlay)
  {
        struct drm_device *dev = overlay->dev;
+       struct drm_i915_gem_request *request;
+       int pipe_a_quirk = 0;
        int ret;
-       drm_i915_private_t *dev_priv = dev->dev_private;
  
        BUG_ON(overlay->active);
        overlay->active = 1;
-       overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
+       if (IS_I830(dev)) {
+               pipe_a_quirk = i830_activate_pipe_a(dev);
+               if (pipe_a_quirk < 0)
+                       return pipe_a_quirk;
+       }
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (request == NULL) {
+               ret = -ENOMEM;
+               goto out;
+       }
  
        BEGIN_LP_RING(4);
        OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
        OUT_RING(MI_NOOP);
        ADVANCE_LP_RING();
  
-       overlay->last_flip_req =
-               i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
-       if (overlay->last_flip_req == 0)
-               return -ENOMEM;
-       ret = i915_do_wait_request(dev,
-                       overlay->last_flip_req, 1, &dev_priv->render_ring);
-       if (ret != 0)
-               return ret;
+       ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
+ out:
+       if (pipe_a_quirk)
+               i830_deactivate_pipe_a(dev);
  
-       overlay->hw_wedged = 0;
-       overlay->last_flip_req = 0;
-       return 0;
+       return ret;
  }
  
  /* overlay needs to be enabled in OCMD reg */
- static void intel_overlay_continue(struct intel_overlay *overlay,
-                           bool load_polyphase_filter)
+ static int intel_overlay_continue(struct intel_overlay *overlay,
+                                 bool load_polyphase_filter)
  {
        struct drm_device *dev = overlay->dev;
          drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_request *request;
        u32 flip_addr = overlay->flip_addr;
        u32 tmp;
  
        BUG_ON(!overlay->active);
  
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (request == NULL)
+               return -ENOMEM;
        if (load_polyphase_filter)
                flip_addr |= OFC_UPDATE;
  
          ADVANCE_LP_RING();
  
        overlay->last_flip_req =
-               i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+               i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+       return 0;
  }
  
- static int intel_overlay_wait_flip(struct intel_overlay *overlay)
+ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
  {
-       struct drm_device *dev = overlay->dev;
-         drm_i915_private_t *dev_priv = dev->dev_private;
-       int ret;
-       u32 tmp;
-       if (overlay->last_flip_req != 0) {
-               ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                               1, &dev_priv->render_ring);
-               if (ret == 0) {
-                       overlay->last_flip_req = 0;
-                       tmp = I915_READ(ISR);
+       struct drm_gem_object *obj = &overlay->old_vid_bo->base;
  
-                       if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
-                               return 0;
-               }
-       }
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(obj);
  
-       /* synchronous slowpath */
-       overlay->hw_wedged = RELEASE_OLD_VID;
+       overlay->old_vid_bo = NULL;
+ }
  
-       BEGIN_LP_RING(2);
-         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-         OUT_RING(MI_NOOP);
-         ADVANCE_LP_RING();
+ static void intel_overlay_off_tail(struct intel_overlay *overlay)
+ {
+       struct drm_gem_object *obj;
  
-       overlay->last_flip_req =
-               i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
-       if (overlay->last_flip_req == 0)
-               return -ENOMEM;
+       /* never have the overlay hw on without showing a frame */
+       BUG_ON(!overlay->vid_bo);
+       obj = &overlay->vid_bo->base;
  
-       ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                       1, &dev_priv->render_ring);
-       if (ret != 0)
-               return ret;
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(obj);
+       overlay->vid_bo = NULL;
  
-       overlay->hw_wedged = 0;
-       overlay->last_flip_req = 0;
-       return 0;
+       overlay->crtc->overlay = NULL;
+       overlay->crtc = NULL;
+       overlay->active = 0;
  }
  
  /* overlay needs to be disabled in OCMD reg */
- static int intel_overlay_off(struct intel_overlay *overlay)
+ static int intel_overlay_off(struct intel_overlay *overlay,
+                            bool interruptible)
  {
-       u32 flip_addr = overlay->flip_addr;
        struct drm_device *dev = overlay->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       int ret;
+       u32 flip_addr = overlay->flip_addr;
+       struct drm_i915_gem_request *request;
  
        BUG_ON(!overlay->active);
  
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (request == NULL)
+               return -ENOMEM;
        /* According to intel docs the overlay hw may hang (when switching
         * off) without loading the filter coeffs. It is however unclear whether
         * this applies to the disabling of the overlay or to the switching off
         * of the hw. Do it in both cases */
        flip_addr |= OFC_UPDATE;
  
+       BEGIN_LP_RING(6);
        /* wait for overlay to go idle */
-       overlay->hw_wedged = SWITCH_OFF_STAGE_1;
-       BEGIN_LP_RING(4);
        OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
        OUT_RING(flip_addr);
-         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-         OUT_RING(MI_NOOP);
-         ADVANCE_LP_RING();
-       overlay->last_flip_req =
-               i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
-       if (overlay->last_flip_req == 0)
-               return -ENOMEM;
-       ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                       1, &dev_priv->render_ring);
-       if (ret != 0)
-               return ret;
+       OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
        /* turn overlay off */
-       overlay->hw_wedged = SWITCH_OFF_STAGE_2;
-       BEGIN_LP_RING(4);
-         OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+       OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
        OUT_RING(flip_addr);
-         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-         OUT_RING(MI_NOOP);
+       OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
        ADVANCE_LP_RING();
  
-       overlay->last_flip_req =
-               i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
-       if (overlay->last_flip_req == 0)
-               return -ENOMEM;
-       ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                       1, &dev_priv->render_ring);
-       if (ret != 0)
-               return ret;
-       overlay->hw_wedged = 0;
-       overlay->last_flip_req = 0;
-       return ret;
- }
- static void intel_overlay_off_tail(struct intel_overlay *overlay)
- {
-       struct drm_gem_object *obj;
-       /* never have the overlay hw on without showing a frame */
-       BUG_ON(!overlay->vid_bo);
-       obj = &overlay->vid_bo->base;
-       i915_gem_object_unpin(obj);
-       drm_gem_object_unreference(obj);
-       overlay->vid_bo = NULL;
-       overlay->crtc->overlay = NULL;
-       overlay->crtc = NULL;
-       overlay->active = 0;
+       return intel_overlay_do_wait_request(overlay, request, interruptible,
+                                            intel_overlay_off_tail);
  }
  
  /* recover from an interruption due to a signal
   * We have to be careful not to repeat work forever an make forward progess. */
- int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
-                                        int interruptible)
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+                                               bool interruptible)
  {
        struct drm_device *dev = overlay->dev;
-       struct drm_gem_object *obj;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       u32 flip_addr;
        int ret;
  
-       if (overlay->hw_wedged == HW_WEDGED)
-               return -EIO;
-       if (overlay->last_flip_req == 0) {
-               overlay->last_flip_req =
-                       i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
-               if (overlay->last_flip_req == 0)
-                       return -ENOMEM;
-       }
+       if (overlay->last_flip_req == 0)
+               return 0;
  
        ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                       interruptible, &dev_priv->render_ring);
-       if (ret != 0)
+                                  interruptible, &dev_priv->render_ring);
+       if (ret)
                return ret;
  
-       switch (overlay->hw_wedged) {
-               case RELEASE_OLD_VID:
-                       obj = &overlay->old_vid_bo->base;
-                       i915_gem_object_unpin(obj);
-                       drm_gem_object_unreference(obj);
-                       overlay->old_vid_bo = NULL;
-                       break;
-               case SWITCH_OFF_STAGE_1:
-                       flip_addr = overlay->flip_addr;
-                       flip_addr |= OFC_UPDATE;
-                       overlay->hw_wedged = SWITCH_OFF_STAGE_2;
-                       BEGIN_LP_RING(4);
-                       OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-                       OUT_RING(flip_addr);
-                       OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-                       OUT_RING(MI_NOOP);
-                       ADVANCE_LP_RING();
-                       overlay->last_flip_req = i915_add_request(dev, NULL,
-                                       0, &dev_priv->render_ring);
-                       if (overlay->last_flip_req == 0)
-                               return -ENOMEM;
-                       ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                                       interruptible, &dev_priv->render_ring);
-                       if (ret != 0)
-                               return ret;
-               case SWITCH_OFF_STAGE_2:
-                       intel_overlay_off_tail(overlay);
-                       break;
-               default:
-                       BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
-       }
+       if (overlay->flip_tail)
+               overlay->flip_tail(overlay);
  
-       overlay->hw_wedged = 0;
        overlay->last_flip_req = 0;
        return 0;
  }
  
  /* Wait for pending overlay flip and release old frame.
   * Needs to be called before the overlay register are changed
-  * via intel_overlay_(un)map_regs_atomic */
+  * via intel_overlay_(un)map_regs
+  */
  static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
  {
+       struct drm_device *dev = overlay->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
-       struct drm_gem_object *obj;
  
-       /* only wait if there is actually an old frame to release to
-        * guarantee forward progress */
+       /* Only wait if there is actually an old frame to release to
+        * guarantee forward progress.
+        */
        if (!overlay->old_vid_bo)
                return 0;
  
-       ret = intel_overlay_wait_flip(overlay);
-       if (ret != 0)
-               return ret;
+       if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+               struct drm_i915_gem_request *request;
  
-       obj = &overlay->old_vid_bo->base;
-       i915_gem_object_unpin(obj);
-       drm_gem_object_unreference(obj);
-       overlay->old_vid_bo = NULL;
+               /* synchronous slowpath */
+               request = kzalloc(sizeof(*request), GFP_KERNEL);
+               if (request == NULL)
+                       return -ENOMEM;
  
+               BEGIN_LP_RING(2);
+               OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+               OUT_RING(MI_NOOP);
+               ADVANCE_LP_RING();
+               ret = intel_overlay_do_wait_request(overlay, request, true,
+                                                   intel_overlay_release_old_vid_tail);
+               if (ret)
+                       return ret;
+       }
+       intel_overlay_release_old_vid_tail(overlay);
        return 0;
  }
  
@@@ -505,65 -502,65 +502,65 @@@ struct put_image_params 
  static int packed_depth_bytes(u32 format)
  {
        switch (format & I915_OVERLAY_DEPTH_MASK) {
-               case I915_OVERLAY_YUV422:
-                       return 4;
-               case I915_OVERLAY_YUV411:
-                       /* return 6; not implemented */
-               default:
-                       return -EINVAL;
+       case I915_OVERLAY_YUV422:
+               return 4;
+       case I915_OVERLAY_YUV411:
+               /* return 6; not implemented */
+       default:
+               return -EINVAL;
        }
  }
  
  static int packed_width_bytes(u32 format, short width)
  {
        switch (format & I915_OVERLAY_DEPTH_MASK) {
-               case I915_OVERLAY_YUV422:
-                       return width << 1;
-               default:
-                       return -EINVAL;
+       case I915_OVERLAY_YUV422:
+               return width << 1;
+       default:
+               return -EINVAL;
        }
  }
  
  static int uv_hsubsampling(u32 format)
  {
        switch (format & I915_OVERLAY_DEPTH_MASK) {
-               case I915_OVERLAY_YUV422:
-               case I915_OVERLAY_YUV420:
-                       return 2;
-               case I915_OVERLAY_YUV411:
-               case I915_OVERLAY_YUV410:
-                       return 4;
-               default:
-                       return -EINVAL;
+       case I915_OVERLAY_YUV422:
+       case I915_OVERLAY_YUV420:
+               return 2;
+       case I915_OVERLAY_YUV411:
+       case I915_OVERLAY_YUV410:
+               return 4;
+       default:
+               return -EINVAL;
        }
  }
  
  static int uv_vsubsampling(u32 format)
  {
        switch (format & I915_OVERLAY_DEPTH_MASK) {
-               case I915_OVERLAY_YUV420:
-               case I915_OVERLAY_YUV410:
-                       return 2;
-               case I915_OVERLAY_YUV422:
-               case I915_OVERLAY_YUV411:
-                       return 1;
-               default:
-                       return -EINVAL;
+       case I915_OVERLAY_YUV420:
+       case I915_OVERLAY_YUV410:
+               return 2;
+       case I915_OVERLAY_YUV422:
+       case I915_OVERLAY_YUV411:
+               return 1;
+       default:
+               return -EINVAL;
        }
  }
  
  static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
  {
        u32 mask, shift, ret;
-       if (IS_I9XX(dev)) {
-               mask = 0x3f;
-               shift = 6;
-       } else {
+       if (IS_GEN2(dev)) {
                mask = 0x1f;
                shift = 5;
+       } else {
+               mask = 0x3f;
+               shift = 6;
        }
        ret = ((offset + width + mask) >> shift) - (offset >> shift);
-       if (IS_I9XX(dev))
+       if (!IS_GEN2(dev))
                ret <<= 1;
        ret -=1;
        return ret << 2;
@@@ -586,7 -583,9 +583,9 @@@ static const u16 y_static_hcoeffs[N_HOR
        0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
        0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
        0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
-       0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
+       0xb000, 0x3000, 0x0800, 0x3000, 0xb000
+ };
  static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
        0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
        0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
        0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
        0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
        0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
-       0x3000, 0x0800, 0x3000};
+       0x3000, 0x0800, 0x3000
+ };
  
  static void update_polyphase_filter(struct overlay_registers *regs)
  {
@@@ -629,29 -629,31 +629,31 @@@ static bool update_scaling_factors(stru
                yscale = 1 << FP_SHIFT;
  
        /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
-               xscale_UV = xscale/uv_hscale;
-               yscale_UV = yscale/uv_vscale;
-               /* make the Y scale to UV scale ratio an exact multiply */
-               xscale = xscale_UV * uv_hscale;
-               yscale = yscale_UV * uv_vscale;
+       xscale_UV = xscale/uv_hscale;
+       yscale_UV = yscale/uv_vscale;
+       /* make the Y scale to UV scale ratio an exact multiply */
+       xscale = xscale_UV * uv_hscale;
+       yscale = yscale_UV * uv_vscale;
        /*} else {
-               xscale_UV = 0;
-               yscale_UV = 0;
-       }*/
+         xscale_UV = 0;
+         yscale_UV = 0;
+         }*/
  
        if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
                scale_changed = true;
        overlay->old_xscale = xscale;
        overlay->old_yscale = yscale;
  
-       regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
-               | ((xscale >> FP_SHIFT) << 16)
-               | ((xscale & FRACT_MASK) << 3);
-       regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
-               | ((xscale_UV >> FP_SHIFT) << 16)
-               | ((xscale_UV & FRACT_MASK) << 3);
-       regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
-               | ((yscale_UV >> FP_SHIFT) << 0);
+       regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
+                          ((xscale >> FP_SHIFT)  << 16) |
+                          ((xscale & FRACT_MASK) << 3));
+       regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
+                        ((xscale_UV >> FP_SHIFT)  << 16) |
+                        ((xscale_UV & FRACT_MASK) << 3));
+       regs->UVSCALEV = ((((yscale    >> FP_SHIFT) << 16) |
+                          ((yscale_UV >> FP_SHIFT) << 0)));
  
        if (scale_changed)
                update_polyphase_filter(regs);
@@@ -663,22 -665,28 +665,28 @@@ static void update_colorkey(struct inte
                            struct overlay_registers *regs)
  {
        u32 key = overlay->color_key;
        switch (overlay->crtc->base.fb->bits_per_pixel) {
-               case 8:
-                       regs->DCLRKV = 0;
-                       regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
-               case 16:
-                       if (overlay->crtc->base.fb->depth == 15) {
-                               regs->DCLRKV = RGB15_TO_COLORKEY(key);
-                               regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
-                       } else {
-                               regs->DCLRKV = RGB16_TO_COLORKEY(key);
-                               regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
-                       }
-               case 24:
-               case 32:
-                       regs->DCLRKV = key;
-                       regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+       case 8:
+               regs->DCLRKV = 0;
+               regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+               break;
+       case 16:
+               if (overlay->crtc->base.fb->depth == 15) {
+                       regs->DCLRKV = RGB15_TO_COLORKEY(key);
+                       regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+               } else {
+                       regs->DCLRKV = RGB16_TO_COLORKEY(key);
+                       regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+               }
+               break;
+       case 24:
+       case 32:
+               regs->DCLRKV = key;
+               regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+               break;
        }
  }
  
@@@ -688,48 -696,48 +696,48 @@@ static u32 overlay_cmd_reg(struct put_i
  
        if (params->format & I915_OVERLAY_YUV_PLANAR) {
                switch (params->format & I915_OVERLAY_DEPTH_MASK) {
-                       case I915_OVERLAY_YUV422:
-                               cmd |= OCMD_YUV_422_PLANAR;
-                               break;
-                       case I915_OVERLAY_YUV420:
-                               cmd |= OCMD_YUV_420_PLANAR;
-                               break;
-                       case I915_OVERLAY_YUV411:
-                       case I915_OVERLAY_YUV410:
-                               cmd |= OCMD_YUV_410_PLANAR;
-                               break;
+               case I915_OVERLAY_YUV422:
+                       cmd |= OCMD_YUV_422_PLANAR;
+                       break;
+               case I915_OVERLAY_YUV420:
+                       cmd |= OCMD_YUV_420_PLANAR;
+                       break;
+               case I915_OVERLAY_YUV411:
+               case I915_OVERLAY_YUV410:
+                       cmd |= OCMD_YUV_410_PLANAR;
+                       break;
                }
        } else { /* YUV packed */
                switch (params->format & I915_OVERLAY_DEPTH_MASK) {
-                       case I915_OVERLAY_YUV422:
-                               cmd |= OCMD_YUV_422_PACKED;
-                               break;
-                       case I915_OVERLAY_YUV411:
-                               cmd |= OCMD_YUV_411_PACKED;
-                               break;
+               case I915_OVERLAY_YUV422:
+                       cmd |= OCMD_YUV_422_PACKED;
+                       break;
+               case I915_OVERLAY_YUV411:
+                       cmd |= OCMD_YUV_411_PACKED;
+                       break;
                }
  
                switch (params->format & I915_OVERLAY_SWAP_MASK) {
-                       case I915_OVERLAY_NO_SWAP:
-                               break;
-                       case I915_OVERLAY_UV_SWAP:
-                               cmd |= OCMD_UV_SWAP;
-                               break;
-                       case I915_OVERLAY_Y_SWAP:
-                               cmd |= OCMD_Y_SWAP;
-                               break;
-                       case I915_OVERLAY_Y_AND_UV_SWAP:
-                               cmd |= OCMD_Y_AND_UV_SWAP;
-                               break;
+               case I915_OVERLAY_NO_SWAP:
+                       break;
+               case I915_OVERLAY_UV_SWAP:
+                       cmd |= OCMD_UV_SWAP;
+                       break;
+               case I915_OVERLAY_Y_SWAP:
+                       cmd |= OCMD_Y_SWAP;
+                       break;
+               case I915_OVERLAY_Y_AND_UV_SWAP:
+                       cmd |= OCMD_Y_AND_UV_SWAP;
+                       break;
                }
        }
  
        return cmd;
  }
  
- int intel_overlay_do_put_image(struct intel_overlay *overlay,
-                              struct drm_gem_object *new_bo,
-                              struct put_image_params *params)
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
+                                     struct drm_gem_object *new_bo,
+                                     struct put_image_params *params)
  {
        int ret, tmp_width;
        struct overlay_registers *regs;
                goto out_unpin;
  
        if (!overlay->active) {
-               regs = intel_overlay_map_regs_atomic(overlay);
+               regs = intel_overlay_map_regs(overlay);
                if (!regs) {
                        ret = -ENOMEM;
                        goto out_unpin;
                }
                regs->OCONFIG = OCONF_CC_OUT_8BIT;
-               if (IS_I965GM(overlay->dev))
+               if (IS_GEN4(overlay->dev))
                        regs->OCONFIG |= OCONF_CSC_MODE_BT709;
                regs->OCONFIG |= overlay->crtc->pipe == 0 ?
                        OCONF_PIPE_A : OCONF_PIPE_B;
-               intel_overlay_unmap_regs_atomic(overlay);
+               intel_overlay_unmap_regs(overlay, regs);
  
                ret = intel_overlay_on(overlay);
                if (ret != 0)
                        goto out_unpin;
        }
  
-       regs = intel_overlay_map_regs_atomic(overlay);
+       regs = intel_overlay_map_regs(overlay);
        if (!regs) {
                ret = -ENOMEM;
                goto out_unpin;
  
        regs->SWIDTH = params->src_w;
        regs->SWIDTHSW = calc_swidthsw(overlay->dev,
-                       params->offset_Y, tmp_width);
+                                      params->offset_Y, tmp_width);
        regs->SHEIGHT = params->src_h;
        regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
        regs->OSTRIDE = params->stride_Y;
                u32 tmp_U, tmp_V;
                regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
                tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
-                               params->src_w/uv_hscale);
+                                     params->src_w/uv_hscale);
                tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
-                               params->src_w/uv_hscale);
+                                     params->src_w/uv_hscale);
                regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
                regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
                regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
  
        regs->OCMD = overlay_cmd_reg(params);
  
-       intel_overlay_unmap_regs_atomic(overlay);
+       intel_overlay_unmap_regs(overlay, regs);
  
-       intel_overlay_continue(overlay, scale_changed);
+       ret = intel_overlay_continue(overlay, scale_changed);
+       if (ret)
+               goto out_unpin;
  
        overlay->old_vid_bo = overlay->vid_bo;
        overlay->vid_bo = to_intel_bo(new_bo);
@@@ -828,20 -838,19 +838,19 @@@ out_unpin
        return ret;
  }
  
- int intel_overlay_switch_off(struct intel_overlay *overlay)
+ int intel_overlay_switch_off(struct intel_overlay *overlay,
+                            bool interruptible)
  {
-       int ret;
        struct overlay_registers *regs;
        struct drm_device *dev = overlay->dev;
+       int ret;
  
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
        BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
  
-       if (overlay->hw_wedged) {
-               ret = intel_overlay_recover_from_interrupt(overlay, 1);
-               if (ret != 0)
-                       return ret;
-       }
+       ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
+       if (ret != 0)
+               return ret;
  
        if (!overlay->active)
                return 0;
        if (ret != 0)
                return ret;
  
-       regs = intel_overlay_map_regs_atomic(overlay);
+       regs = intel_overlay_map_regs(overlay);
        regs->OCMD = 0;
-       intel_overlay_unmap_regs_atomic(overlay);
+       intel_overlay_unmap_regs(overlay, regs);
  
-       ret = intel_overlay_off(overlay);
+       ret = intel_overlay_off(overlay, interruptible);
        if (ret != 0)
                return ret;
  
        intel_overlay_off_tail(overlay);
        return 0;
  }
  
  static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
                                          struct intel_crtc *crtc)
  {
-         drm_i915_private_t *dev_priv = overlay->dev->dev_private;
-       u32 pipeconf;
-       int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
+       drm_i915_private_t *dev_priv = overlay->dev->dev_private;
  
-       if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
+       if (!crtc->active)
                return -EINVAL;
  
-       pipeconf = I915_READ(pipeconf_reg);
        /* can't use the overlay with double wide pipe */
-       if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
+       if (INTEL_INFO(overlay->dev)->gen < 4 &&
+           (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
                return -EINVAL;
  
        return 0;
  static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
  {
        struct drm_device *dev = overlay->dev;
-         drm_i915_private_t *dev_priv = dev->dev_private;
-       u32 ratio;
+       drm_i915_private_t *dev_priv = dev->dev_private;
        u32 pfit_control = I915_READ(PFIT_CONTROL);
+       u32 ratio;
  
        /* XXX: This is not the same logic as in the xorg driver, but more in
-        * line with the intel documentation for the i965 */
-       if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
-               ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
-       } else { /* on i965 use the PGM reg to read out the autoscaler values */
-               ratio = I915_READ(PFIT_PGM_RATIOS);
-               if (IS_I965G(dev))
-                       ratio >>= PFIT_VERT_SCALE_SHIFT_965;
+        * line with the intel documentation for the i965
+        */
+       if (INTEL_INFO(dev)->gen >= 4) {
+               /* on i965 use the PGM reg to read out the autoscaler values */
+               ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+       } else {
+               if (pfit_control & VERT_AUTO_SCALE)
+                       ratio = I915_READ(PFIT_AUTO_RATIOS);
                else
-                       ratio >>= PFIT_VERT_SCALE_SHIFT;
+                       ratio = I915_READ(PFIT_PGM_RATIOS);
+               ratio >>= PFIT_VERT_SCALE_SHIFT;
        }
  
        overlay->pfit_vscale_ratio = ratio;
@@@ -909,12 -916,10 +916,10 @@@ static int check_overlay_dst(struct int
  {
        struct drm_display_mode *mode = &overlay->crtc->base.mode;
  
-       if ((rec->dst_x < mode->crtc_hdisplay)
-           && (rec->dst_x + rec->dst_width
-                   <= mode->crtc_hdisplay)
-           && (rec->dst_y < mode->crtc_vdisplay)
-           && (rec->dst_y + rec->dst_height
-                   <= mode->crtc_vdisplay))
+       if (rec->dst_x < mode->crtc_hdisplay &&
+           rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
+           rec->dst_y < mode->crtc_vdisplay &&
+           rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
                return 0;
        else
                return -EINVAL;
@@@ -939,53 -944,57 +944,57 @@@ static int check_overlay_src(struct drm
                             struct drm_intel_overlay_put_image *rec,
                             struct drm_gem_object *new_bo)
  {
-       u32 stride_mask;
-       int depth;
        int uv_hscale = uv_hsubsampling(rec->flags);
        int uv_vscale = uv_vsubsampling(rec->flags);
-       size_t tmp;
+       u32 stride_mask, depth, tmp;
  
        /* check src dimensions */
        if (IS_845G(dev) || IS_I830(dev)) {
-               if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
-                   || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+               if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
+                   rec->src_width  > IMAGE_MAX_WIDTH_LEGACY)
                        return -EINVAL;
        } else {
-               if (rec->src_height > IMAGE_MAX_HEIGHT
-                   || rec->src_width > IMAGE_MAX_WIDTH)
+               if (rec->src_height > IMAGE_MAX_HEIGHT ||
+                   rec->src_width  > IMAGE_MAX_WIDTH)
                        return -EINVAL;
        }
        /* better safe than sorry, use 4 as the maximal subsampling ratio */
-       if (rec->src_height < N_VERT_Y_TAPS*4
-           || rec->src_width < N_HORIZ_Y_TAPS*4)
+       if (rec->src_height < N_VERT_Y_TAPS*4 ||
+           rec->src_width  < N_HORIZ_Y_TAPS*4)
                return -EINVAL;
  
        /* check alignment constraints */
        switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
-               case I915_OVERLAY_RGB:
-                       /* not implemented */
+       case I915_OVERLAY_RGB:
+               /* not implemented */
+               return -EINVAL;
+       case I915_OVERLAY_YUV_PACKED:
+               if (uv_vscale != 1)
                        return -EINVAL;
-               case I915_OVERLAY_YUV_PACKED:
-                       depth = packed_depth_bytes(rec->flags);
-                       if (uv_vscale != 1)
-                               return -EINVAL;
-                       if (depth < 0)
-                               return depth;
-                       /* ignore UV planes */
-                       rec->stride_UV = 0;
-                       rec->offset_U = 0;
-                       rec->offset_V = 0;
-                       /* check pixel alignment */
-                       if (rec->offset_Y % depth)
-                               return -EINVAL;
-                       break;
-               case I915_OVERLAY_YUV_PLANAR:
-                       if (uv_vscale < 0 || uv_hscale < 0)
-                               return -EINVAL;
-                       /* no offset restrictions for planar formats */
-                       break;
-               default:
+               depth = packed_depth_bytes(rec->flags);
+               if (depth < 0)
+                       return depth;
+               /* ignore UV planes */
+               rec->stride_UV = 0;
+               rec->offset_U = 0;
+               rec->offset_V = 0;
+               /* check pixel alignment */
+               if (rec->offset_Y % depth)
+                       return -EINVAL;
+               break;
+       case I915_OVERLAY_YUV_PLANAR:
+               if (uv_vscale < 0 || uv_hscale < 0)
                        return -EINVAL;
+               /* no offset restrictions for planar formats */
+               break;
+       default:
+               return -EINVAL;
        }
  
        if (rec->src_width % uv_hscale)
  
        if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
                return -EINVAL;
-       if (IS_I965G(dev) && rec->stride_Y < 512)
+       if (IS_GEN4(dev) && rec->stride_Y < 512)
                return -EINVAL;
  
        tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
-               4 : 8;
-       if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
+               4096 : 8192;
+       if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
                return -EINVAL;
  
        /* check buffer dimensions */
        switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
-               case I915_OVERLAY_RGB:
-               case I915_OVERLAY_YUV_PACKED:
-                       /* always 4 Y values per depth pixels */
-                       if (packed_width_bytes(rec->flags, rec->src_width)
-                                       > rec->stride_Y)
-                               return -EINVAL;
-                       tmp = rec->stride_Y*rec->src_height;
-                       if (rec->offset_Y + tmp > new_bo->size)
-                               return -EINVAL;
-                       break;
-               case I915_OVERLAY_YUV_PLANAR:
-                       if (rec->src_width > rec->stride_Y)
-                               return -EINVAL;
-                       if (rec->src_width/uv_hscale > rec->stride_UV)
-                               return -EINVAL;
-                       tmp = rec->stride_Y*rec->src_height;
-                       if (rec->offset_Y + tmp > new_bo->size)
-                               return -EINVAL;
-                       tmp = rec->stride_UV*rec->src_height;
-                       tmp /= uv_vscale;
-                       if (rec->offset_U + tmp > new_bo->size
-                           || rec->offset_V + tmp > new_bo->size)
-                               return -EINVAL;
-                       break;
+       case I915_OVERLAY_RGB:
+       case I915_OVERLAY_YUV_PACKED:
+               /* always 4 Y values per depth pixels */
+               if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
+                       return -EINVAL;
+               tmp = rec->stride_Y*rec->src_height;
+               if (rec->offset_Y + tmp > new_bo->size)
+                       return -EINVAL;
+               break;
+       case I915_OVERLAY_YUV_PLANAR:
+               if (rec->src_width > rec->stride_Y)
+                       return -EINVAL;
+               if (rec->src_width/uv_hscale > rec->stride_UV)
+                       return -EINVAL;
+               tmp = rec->stride_Y * rec->src_height;
+               if (rec->offset_Y + tmp > new_bo->size)
+                       return -EINVAL;
+               tmp = rec->stride_UV * (rec->src_height / uv_vscale);
+               if (rec->offset_U + tmp > new_bo->size ||
+                   rec->offset_V + tmp > new_bo->size)
+                       return -EINVAL;
+               break;
        }
  
        return 0;
  }
  
+ /**
+  * Return the pipe currently connected to the panel fitter,
+  * or -1 if the panel fitter is not present or not in use
+  */
+ static int intel_panel_fitter_pipe(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32  pfit_control;
+       /* i830 doesn't have a panel fitter */
+       if (IS_I830(dev))
+               return -1;
+       pfit_control = I915_READ(PFIT_CONTROL);
+       /* See if the panel fitter is in use */
+       if ((pfit_control & PFIT_ENABLE) == 0)
+               return -1;
+       /* 965 can place panel fitter on either pipe */
+       if (IS_GEN4(dev))
+               return (pfit_control >> 29) & 0x3;
+       /* older chips can only use pipe 1 */
+       return 1;
+ }
  int intel_overlay_put_image(struct drm_device *dev, void *data,
                              struct drm_file *file_priv)
  {
                mutex_lock(&dev->mode_config.mutex);
                mutex_lock(&dev->struct_mutex);
  
-               ret = intel_overlay_switch_off(overlay);
+               ret = intel_overlay_switch_off(overlay, true);
  
                mutex_unlock(&dev->struct_mutex);
                mutex_unlock(&dev->mode_config.mutex);
                return -ENOMEM;
  
        drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
-                         DRM_MODE_OBJECT_CRTC);
+                                          DRM_MODE_OBJECT_CRTC);
        if (!drmmode_obj) {
                ret = -ENOENT;
                goto out_free;
        crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
  
        new_bo = drm_gem_object_lookup(dev, file_priv,
-                       put_image_rec->bo_handle);
+                                      put_image_rec->bo_handle);
        if (!new_bo) {
                ret = -ENOENT;
                goto out_free;
        mutex_lock(&dev->mode_config.mutex);
        mutex_lock(&dev->struct_mutex);
  
-       if (overlay->hw_wedged) {
-               ret = intel_overlay_recover_from_interrupt(overlay, 1);
-               if (ret != 0)
-                       goto out_unlock;
-       }
+       ret = intel_overlay_recover_from_interrupt(overlay, true);
+       if (ret != 0)
+               goto out_unlock;
  
        if (overlay->crtc != crtc) {
                struct drm_display_mode *mode = &crtc->base.mode;
-               ret = intel_overlay_switch_off(overlay);
+               ret = intel_overlay_switch_off(overlay, true);
                if (ret != 0)
                        goto out_unlock;
  
                overlay->crtc = crtc;
                crtc->overlay = overlay;
  
-               if (intel_panel_fitter_pipe(dev) == crtc->pipe
-                   /* and line to wide, i.e. one-line-mode */
-                   && mode->hdisplay > 1024) {
+               /* line too wide, i.e. one-line-mode */
+               if (mode->hdisplay > 1024 &&
+                   intel_panel_fitter_pipe(dev) == crtc->pipe) {
                        overlay->pfit_active = 1;
                        update_pfit_vscale_ratio(overlay);
                } else
  
        if (overlay->pfit_active) {
                params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
-                       overlay->pfit_vscale_ratio);
+                                overlay->pfit_vscale_ratio);
                /* shifting right rounds downwards, so add 1 */
                params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
-                       overlay->pfit_vscale_ratio) + 1;
+                                overlay->pfit_vscale_ratio) + 1;
        } else {
                params->dst_y = put_image_rec->dst_y;
                params->dst_h = put_image_rec->dst_height;
        params->src_h = put_image_rec->src_height;
        params->src_scan_w = put_image_rec->src_scan_width;
        params->src_scan_h = put_image_rec->src_scan_height;
-       if (params->src_scan_h > params->src_h
-           || params->src_scan_w > params->src_w) {
+       if (params->src_scan_h > params->src_h ||
+           params->src_scan_w > params->src_w) {
                ret = -EINVAL;
                goto out_unlock;
        }
@@@ -1203,7 -1237,7 +1237,7 @@@ static bool check_gamma_bounds(u32 gamm
                return false;
  
        for (i = 0; i < 3; i++) {
-               if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+               if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
                        return false;
        }
  
@@@ -1224,16 -1258,18 +1258,18 @@@ static bool check_gamma5_errata(u32 gam
  
  static int check_gamma(struct drm_intel_overlay_attrs *attrs)
  {
-       if (!check_gamma_bounds(0, attrs->gamma0)
-           || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
-           || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
-           || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
-           || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
-           || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
-           || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+       if (!check_gamma_bounds(0, attrs->gamma0) ||
+           !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
+           !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
+           !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
+           !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
+           !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
+           !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
                return -EINVAL;
        if (!check_gamma5_errata(attrs->gamma5))
                return -EINVAL;
        return 0;
  }
  
@@@ -1260,13 -1296,14 +1296,14 @@@ int intel_overlay_attrs(struct drm_devi
        mutex_lock(&dev->mode_config.mutex);
        mutex_lock(&dev->struct_mutex);
  
+       ret = -EINVAL;
        if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
-               attrs->color_key = overlay->color_key;
+               attrs->color_key  = overlay->color_key;
                attrs->brightness = overlay->brightness;
-               attrs->contrast = overlay->contrast;
+               attrs->contrast   = overlay->contrast;
                attrs->saturation = overlay->saturation;
  
-               if (IS_I9XX(dev)) {
+               if (!IS_GEN2(dev)) {
                        attrs->gamma0 = I915_READ(OGAMC0);
                        attrs->gamma1 = I915_READ(OGAMC1);
                        attrs->gamma2 = I915_READ(OGAMC2);
                        attrs->gamma4 = I915_READ(OGAMC4);
                        attrs->gamma5 = I915_READ(OGAMC5);
                }
-               ret = 0;
        } else {
-               overlay->color_key = attrs->color_key;
-               if (attrs->brightness >= -128 && attrs->brightness <= 127) {
-                       overlay->brightness = attrs->brightness;
-               } else {
-                       ret = -EINVAL;
+               if (attrs->brightness < -128 || attrs->brightness > 127)
                        goto out_unlock;
-               }
-               if (attrs->contrast <= 255) {
-                       overlay->contrast = attrs->contrast;
-               } else {
-                       ret = -EINVAL;
+               if (attrs->contrast > 255)
                        goto out_unlock;
-               }
-               if (attrs->saturation <= 1023) {
-                       overlay->saturation = attrs->saturation;
-               } else {
-                       ret = -EINVAL;
+               if (attrs->saturation > 1023)
                        goto out_unlock;
-               }
  
-               regs = intel_overlay_map_regs_atomic(overlay);
+               overlay->color_key  = attrs->color_key;
+               overlay->brightness = attrs->brightness;
+               overlay->contrast   = attrs->contrast;
+               overlay->saturation = attrs->saturation;
+               regs = intel_overlay_map_regs(overlay);
                if (!regs) {
                        ret = -ENOMEM;
                        goto out_unlock;
  
                update_reg_attrs(overlay, regs);
  
-               intel_overlay_unmap_regs_atomic(overlay);
+               intel_overlay_unmap_regs(overlay, regs);
  
                if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
-                       if (!IS_I9XX(dev)) {
-                               ret = -EINVAL;
+                       if (IS_GEN2(dev))
                                goto out_unlock;
-                       }
  
                        if (overlay->active) {
                                ret = -EBUSY;
                        }
  
                        ret = check_gamma(attrs);
-                       if (ret != 0)
+                       if (ret)
                                goto out_unlock;
  
                        I915_WRITE(OGAMC0, attrs->gamma0);
                        I915_WRITE(OGAMC4, attrs->gamma4);
                        I915_WRITE(OGAMC5, attrs->gamma5);
                }
-               ret = 0;
        }
  
+       ret = 0;
  out_unlock:
        mutex_unlock(&dev->struct_mutex);
        mutex_unlock(&dev->mode_config.mutex);
@@@ -1346,7 -1372,7 +1372,7 @@@ void intel_setup_overlay(struct drm_dev
        struct overlay_registers *regs;
        int ret;
  
-       if (!OVERLAY_EXISTS(dev))
+       if (!HAS_OVERLAY(dev))
                return;
  
        overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
                goto out_free;
        overlay->reg_bo = to_intel_bo(reg_bo);
  
-       if (OVERLAY_NONPHYSICAL(dev)) {
-               ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
-               if (ret) {
-                         DRM_ERROR("failed to pin overlay register bo\n");
-                         goto out_free_bo;
-                 }
-               overlay->flip_addr = overlay->reg_bo->gtt_offset;
-       } else {
+       if (OVERLAY_NEEDS_PHYSICAL(dev)) {
                ret = i915_gem_attach_phys_object(dev, reg_bo,
                                                  I915_GEM_PHYS_OVERLAY_REGS,
-                                                 0);
+                                                 PAGE_SIZE);
                  if (ret) {
                          DRM_ERROR("failed to attach phys overlay regs\n");
                          goto out_free_bo;
                  }
                overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+       } else {
+               ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+               if (ret) {
+                         DRM_ERROR("failed to pin overlay register bo\n");
+                         goto out_free_bo;
+                 }
+               overlay->flip_addr = overlay->reg_bo->gtt_offset;
+               ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
+               if (ret) {
+                         DRM_ERROR("failed to move overlay register bo into the GTT\n");
+                         goto out_unpin_bo;
+                 }
        }
  
        /* init all values */
        overlay->contrast = 75;
        overlay->saturation = 146;
  
-       regs = intel_overlay_map_regs_atomic(overlay);
+       regs = intel_overlay_map_regs(overlay);
        if (!regs)
                goto out_free_bo;
  
        memset(regs, 0, sizeof(struct overlay_registers));
        update_polyphase_filter(regs);
        update_reg_attrs(overlay, regs);
  
-       intel_overlay_unmap_regs_atomic(overlay);
+       intel_overlay_unmap_regs(overlay, regs);
  
        dev_priv->overlay = overlay;
        DRM_INFO("initialized overlay support\n");
        return;
  
+ out_unpin_bo:
+       i915_gem_object_unpin(reg_bo);
  out_free_bo:
        drm_gem_object_unreference(reg_bo);
  out_free:
  
  void intel_cleanup_overlay(struct drm_device *dev)
  {
-         drm_i915_private_t *dev_priv = dev->dev_private;
+       drm_i915_private_t *dev_priv = dev->dev_private;
  
-       if (dev_priv->overlay) {
-               /* The bo's should be free'd by the generic code already.
-                * Furthermore modesetting teardown happens beforehand so the
-                * hardware should be off already */
-               BUG_ON(dev_priv->overlay->active);
+       if (!dev_priv->overlay)
+               return;
  
-               kfree(dev_priv->overlay);
-       }
+       /* The bo's should be free'd by the generic code already.
+        * Furthermore modesetting teardown happens beforehand so the
+        * hardware should be off already */
+       BUG_ON(dev_priv->overlay->active);
+       drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+       kfree(dev_priv->overlay);
  }
  
+ #ifdef CONFIG_DEBUG_FS
+ #include <linux/seq_file.h>
  struct intel_overlay_error_state {
        struct overlay_registers regs;
        unsigned long base;
        u32 isr;
  };
  
 -intel_overlay_map_regs_atomic(struct intel_overlay *overlay,
 -                            int slot)
+ static struct overlay_registers *
 -        drm_i915_private_t *dev_priv = overlay->dev->dev_private;
++intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+ {
 -                                              overlay->reg_bo->gtt_offset,
 -                                              slot);
++      drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+       struct overlay_registers *regs;
+       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+               regs = overlay->reg_bo->phys_obj->handle->vaddr;
+       else
+               regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 -                                          int slot,
++                                              overlay->reg_bo->gtt_offset);
+       return regs;
+ }
+ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
 -              io_mapping_unmap_atomic(regs, slot);
+                                           struct overlay_registers *regs)
+ {
+       if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
++              io_mapping_unmap_atomic(regs);
+ }
  struct intel_overlay_error_state *
  intel_overlay_capture_error_state(struct drm_device *dev)
  {
  
        error->dovsta = I915_READ(DOVSTA);
        error->isr = I915_READ(ISR);
-       if (OVERLAY_NONPHYSICAL(overlay->dev))
-               error->base = (long) overlay->reg_bo->gtt_offset;
-       else
+       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
                error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+       else
+               error->base = (long) overlay->reg_bo->gtt_offset;
  
 -      regs = intel_overlay_map_regs_atomic(overlay, KM_IRQ0);
 +      regs = intel_overlay_map_regs_atomic(overlay);
        if (!regs)
                goto err;
  
        memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
-       intel_overlay_unmap_regs_atomic(overlay);
 -      intel_overlay_unmap_regs_atomic(overlay, KM_IRQ0, regs);
++      intel_overlay_unmap_regs_atomic(overlay, regs);
  
        return error;
  
@@@ -1514,3 -1578,4 +1575,4 @@@ intel_overlay_print_error_state(struct 
        P(UVSCALEV);
  #undef P
  }
+ #endif
@@@ -60,8 -60,6 +60,6 @@@ static struct drm_driver driver = 
        .irq_uninstall = mga_driver_irq_uninstall,
        .irq_handler = mga_driver_irq_handler,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = mga_ioctls,
        .dma_ioctl = mga_dma_buffers,
        .fops = {
@@@ -75,7 -73,6 +73,7 @@@
  #ifdef CONFIG_COMPAT
                .compat_ioctl = mga_compat_ioctl,
  #endif
 +              .llseek = noop_llseek,
        },
        .pci_driver = {
                .name = DRIVER_NAME,
@@@ -43,9 -43,6 +43,6 @@@
  #define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
  #define LOG_OLD_VALUE(x)
  
- #define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
- #define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
  struct init_exec {
        bool execute;
        bool repeat;
@@@ -272,12 -269,6 +269,6 @@@ struct init_tbl_entry 
        int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
  };
  
- struct bit_entry {
-       uint8_t id[2];
-       uint16_t length;
-       uint16_t offset;
- };
  static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
  
  #define MACRO_INDEX_SIZE      2
@@@ -1231,7 -1222,7 +1222,7 @@@ init_dp_condition(struct nvbios *bios, 
                        return 3;
                }
  
-               if (cond & 1)
+               if (!(cond & 1))
                        iexec->execute = false;
        }
                break;
@@@ -2167,11 -2158,11 +2158,11 @@@ peek_fb(struct drm_device *dev, struct 
  
        if (off < pci_resource_len(dev->pdev, 1)) {
                uint8_t __iomem *p =
 -                      io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
 +                      io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
  
                val = ioread32(p + (off & ~PAGE_MASK));
  
 -              io_mapping_unmap_atomic(p, KM_USER0);
 +              io_mapping_unmap_atomic(p);
        }
  
        return val;
@@@ -2183,12 -2174,12 +2174,12 @@@ poke_fb(struct drm_device *dev, struct 
  {
        if (off < pci_resource_len(dev->pdev, 1)) {
                uint8_t __iomem *p =
 -                      io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
 +                      io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
  
                iowrite32(val, p + (off & ~PAGE_MASK));
                wmb();
  
 -              io_mapping_unmap_atomic(p, KM_USER0);
 +              io_mapping_unmap_atomic(p);
        }
  }
  
@@@ -4675,6 -4666,92 +4666,92 @@@ int run_tmds_table(struct drm_device *d
        return 0;
  }
  
+ struct pll_mapping {
+       u8  type;
+       u32 reg;
+ };
+ static struct pll_mapping nv04_pll_mapping[] = {
+       { PLL_CORE  , NV_PRAMDAC_NVPLL_COEFF },
+       { PLL_MEMORY, NV_PRAMDAC_MPLL_COEFF },
+       { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
+       { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
+       {}
+ };
+ static struct pll_mapping nv40_pll_mapping[] = {
+       { PLL_CORE  , 0x004000 },
+       { PLL_MEMORY, 0x004020 },
+       { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
+       { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
+       {}
+ };
+ static struct pll_mapping nv50_pll_mapping[] = {
+       { PLL_CORE  , 0x004028 },
+       { PLL_SHADER, 0x004020 },
+       { PLL_UNK03 , 0x004000 },
+       { PLL_MEMORY, 0x004008 },
+       { PLL_UNK40 , 0x00e810 },
+       { PLL_UNK41 , 0x00e818 },
+       { PLL_UNK42 , 0x00e824 },
+       { PLL_VPLL0 , 0x614100 },
+       { PLL_VPLL1 , 0x614900 },
+       {}
+ };
+ static struct pll_mapping nv84_pll_mapping[] = {
+       { PLL_CORE  , 0x004028 },
+       { PLL_SHADER, 0x004020 },
+       { PLL_MEMORY, 0x004008 },
+       { PLL_UNK05 , 0x004030 },
+       { PLL_UNK41 , 0x00e818 },
+       { PLL_VPLL0 , 0x614100 },
+       { PLL_VPLL1 , 0x614900 },
+       {}
+ };
+ u32
+ get_pll_register(struct drm_device *dev, enum pll_types type)
+ {
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nvbios *bios = &dev_priv->vbios;
+       struct pll_mapping *map;
+       int i;
+       if (dev_priv->card_type < NV_40)
+               map = nv04_pll_mapping;
+       else
+       if (dev_priv->card_type < NV_50)
+               map = nv40_pll_mapping;
+       else {
+               u8 *plim = &bios->data[bios->pll_limit_tbl_ptr];
+               if (plim[0] >= 0x30) {
+                       u8 *entry = plim + plim[1];
+                       for (i = 0; i < plim[3]; i++, entry += plim[2]) {
+                               if (entry[0] == type)
+                                       return ROM32(entry[3]);
+                       }
+                       return 0;
+               }
+               if (dev_priv->chipset == 0x50)
+                       map = nv50_pll_mapping;
+               else
+                       map = nv84_pll_mapping;
+       }
+       while (map->reg) {
+               if (map->type == type)
+                       return map->reg;
+               map++;
+       }
+       return 0;
+ }
  int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
  {
        /*
        /* initialize all members to zero */
        memset(pll_lim, 0, sizeof(struct pll_lims));
  
+       /* if we were passed a type rather than a register, figure
+        * out the register and store it
+        */
+       if (limit_match > PLL_MAX)
+               pll_lim->reg = limit_match;
+       else {
+               pll_lim->reg = get_pll_register(dev, limit_match);
+               if (!pll_lim->reg)
+                       return -ENOENT;
+       }
        if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
                uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
  
                pll_lim->max_usable_log2p = 0x6;
        } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
                uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
-               uint32_t reg = 0; /* default match */
                uint8_t *pll_rec;
                int i;
  
                        NV_WARN(dev, "Default PLL limit entry has non-zero "
                                       "register field\n");
  
-               if (limit_match > MAX_PLL_TYPES)
-                       /* we've been passed a reg as the match */
-                       reg = limit_match;
-               else /* limit match is a pll type */
-                       for (i = 1; i < entries && !reg; i++) {
-                               uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]);
-                               if (limit_match == NVPLL &&
-                                   (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000))
-                                       reg = cmpreg;
-                               if (limit_match == MPLL &&
-                                   (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020))
-                                       reg = cmpreg;
-                               if (limit_match == VPLL1 &&
-                                   (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010))
-                                       reg = cmpreg;
-                               if (limit_match == VPLL2 &&
-                                   (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
-                                       reg = cmpreg;
-                       }
                for (i = 1; i < entries; i++)
-                       if (ROM32(bios->data[plloffs + recordlen * i]) == reg) {
+                       if (ROM32(bios->data[plloffs + recordlen * i]) == pll_lim->reg) {
                                pllindex = i;
                                break;
                        }
  
+               if ((dev_priv->card_type >= NV_50) && (pllindex == 0)) {
+                       NV_ERROR(dev, "Register 0x%08x not found in PLL "
+                                "limits table", pll_lim->reg);
+                       return -ENOENT;
+               }
                pll_rec = &bios->data[plloffs + recordlen * pllindex];
  
                BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
-                       pllindex ? reg : 0);
+                       pllindex ? pll_lim->reg : 0);
  
                /*
                 * Frequencies are stored in tables in MHz, kHz are more
                if (cv == 0x51 && !pll_lim->refclk) {
                        uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
  
-                       if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) ||
-                           ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) {
+                       if ((pll_lim->reg == NV_PRAMDAC_VPLL_COEFF && sel_clk & 0x20) ||
+                           (pll_lim->reg == NV_RAMDAC_VPLL2 && sel_clk & 0x80)) {
                                if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
                                        pll_lim->refclk = 200000;
                                else
                int i;
  
                BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
-                       limit_match);
+                       pll_lim->reg);
  
                for (i = 0; i < entries; i++, entry += recordlen) {
-                       if (ROM32(entry[3]) == limit_match) {
+                       if (ROM32(entry[3]) == pll_lim->reg) {
                                record = &bios->data[ROM16(entry[1])];
                                break;
                        }
  
                if (!record) {
                        NV_ERROR(dev, "Register 0x%08x not found in PLL "
-                                "limits table", limit_match);
+                                "limits table", pll_lim->reg);
                        return -ENOENT;
                }
  
                int i;
  
                BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
-                       limit_match);
+                       pll_lim->reg);
  
                for (i = 0; i < entries; i++, entry += recordlen) {
-                       if (ROM32(entry[3]) == limit_match) {
+                       if (ROM32(entry[3]) == pll_lim->reg) {
                                record = &bios->data[ROM16(entry[1])];
                                break;
                        }
  
                if (!record) {
                        NV_ERROR(dev, "Register 0x%08x not found in PLL "
-                                "limits table", limit_match);
+                                "limits table", pll_lim->reg);
                        return -ENOENT;
                }
  
@@@ -5293,7 -5365,7 +5365,7 @@@ parse_bit_M_tbl_entry(struct drm_devic
        if (bitentry->length < 0x5)
                return 0;
  
-       if (bitentry->id[1] < 2) {
+       if (bitentry->version < 2) {
                bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
                bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
        } else {
@@@ -5403,27 -5475,40 +5475,40 @@@ struct bit_table 
  
  #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
  
+ int
+ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
+ {
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nvbios *bios = &dev_priv->vbios;
+       u8 entries, *entry;
+       entries = bios->data[bios->offset + 10];
+       entry   = &bios->data[bios->offset + 12];
+       while (entries--) {
+               if (entry[0] == id) {
+                       bit->id = entry[0];
+                       bit->version = entry[1];
+                       bit->length = ROM16(entry[2]);
+                       bit->offset = ROM16(entry[4]);
+                       bit->data = ROMPTR(bios, entry[4]);
+                       return 0;
+               }
+               entry += bios->data[bios->offset + 9];
+       }
+       return -ENOENT;
+ }
  static int
  parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
                struct bit_table *table)
  {
        struct drm_device *dev = bios->dev;
-       uint8_t maxentries = bios->data[bitoffset + 4];
-       int i, offset;
        struct bit_entry bitentry;
  
-       for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) {
-               bitentry.id[0] = bios->data[offset];
-               if (bitentry.id[0] != table->id)
-                       continue;
-               bitentry.id[1] = bios->data[offset + 1];
-               bitentry.length = ROM16(bios->data[offset + 2]);
-               bitentry.offset = ROM16(bios->data[offset + 4]);
+       if (bit_table(dev, table->id, &bitentry) == 0)
                return table->parse_fn(dev, bios, &bitentry);
-       }
  
        NV_INFO(dev, "BIT table '%c' not found\n", table->id);
        return -ENOSYS;
@@@ -5683,8 -5768,14 +5768,14 @@@ static uint16_t findstr(uint8_t *data, 
  static struct dcb_gpio_entry *
  new_gpio_entry(struct nvbios *bios)
  {
+       struct drm_device *dev = bios->dev;
        struct dcb_gpio_table *gpio = &bios->dcb.gpio;
  
+       if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
+               NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
+               return NULL;
+       }
        return &gpio->entry[gpio->entries++];
  }
  
@@@ -5705,114 -5796,91 +5796,91 @@@ nouveau_bios_gpio_entry(struct drm_devi
        return NULL;
  }
  
- static void
- parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
- {
-       struct dcb_gpio_entry *gpio;
-       uint16_t ent = ROM16(bios->data[offset]);
-       uint8_t line = ent & 0x1f,
-               tag = ent >> 5 & 0x3f,
-               flags = ent >> 11 & 0x1f;
-       if (tag == 0x3f)
-               return;
-       gpio = new_gpio_entry(bios);
-       gpio->tag = tag;
-       gpio->line = line;
-       gpio->invert = flags != 4;
-       gpio->entry = ent;
- }
- static void
- parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
- {
-       uint32_t entry = ROM32(bios->data[offset]);
-       struct dcb_gpio_entry *gpio;
-       if ((entry & 0x0000ff00) == 0x0000ff00)
-               return;
-       gpio = new_gpio_entry(bios);
-       gpio->tag = (entry & 0x0000ff00) >> 8;
-       gpio->line = (entry & 0x0000001f) >> 0;
-       gpio->state_default = (entry & 0x01000000) >> 24;
-       gpio->state[0] = (entry & 0x18000000) >> 27;
-       gpio->state[1] = (entry & 0x60000000) >> 29;
-       gpio->entry = entry;
- }
  static void
  parse_dcb_gpio_table(struct nvbios *bios)
  {
        struct drm_device *dev = bios->dev;
-       uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr;
-       uint8_t *gpio_table = &bios->data[gpio_table_ptr];
-       int header_len = gpio_table[1],
-           entries = gpio_table[2],
-           entry_len = gpio_table[3];
-       void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
+       struct dcb_gpio_entry *e;
+       u8 headerlen, entries, recordlen;
+       u8 *dcb, *gpio = NULL, *entry;
        int i;
  
-       if (bios->dcb.version >= 0x40) {
-               if (gpio_table_ptr && entry_len != 4) {
-                       NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
-                       return;
-               }
+       dcb = ROMPTR(bios, bios->data[0x36]);
+       if (dcb[0] >= 0x30) {
+               gpio = ROMPTR(bios, dcb[10]);
+               if (!gpio)
+                       goto no_table;
  
-               parse_entry = parse_dcb40_gpio_entry;
+               headerlen = gpio[1];
+               entries   = gpio[2];
+               recordlen = gpio[3];
+       } else
+       if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) {
+               gpio = ROMPTR(bios, dcb[-15]);
+               if (!gpio)
+                       goto no_table;
+               headerlen = 3;
+               entries   = gpio[2];
+               recordlen = gpio[1];
+       } else
+       if (dcb[0] >= 0x22) {
+               /* No GPIO table present, parse the TVDAC GPIO data. */
+               uint8_t *tvdac_gpio = &dcb[-5];
  
-       } else if (bios->dcb.version >= 0x30) {
-               if (gpio_table_ptr && entry_len != 2) {
-                       NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
-                       return;
+               if (tvdac_gpio[0] & 1) {
+                       e = new_gpio_entry(bios);
+                       e->tag = DCB_GPIO_TVDAC0;
+                       e->line = tvdac_gpio[1] >> 4;
+                       e->invert = tvdac_gpio[0] & 2;
                }
  
-               parse_entry = parse_dcb30_gpio_entry;
-       } else if (bios->dcb.version >= 0x22) {
-               /*
-                * DCBs older than v3.0 don't really have a GPIO
-                * table, instead they keep some GPIO info at fixed
-                * locations.
-                */
-               uint16_t dcbptr = ROM16(bios->data[0x36]);
-               uint8_t *tvdac_gpio = &bios->data[dcbptr - 5];
+               goto no_table;
+       } else {
+               NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]);
+               goto no_table;
+       }
  
-               if (tvdac_gpio[0] & 1) {
-                       struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+       entry = gpio + headerlen;
+       for (i = 0; i < entries; i++, entry += recordlen) {
+               e = new_gpio_entry(bios);
+               if (!e)
+                       break;
  
-                       gpio->tag = DCB_GPIO_TVDAC0;
-                       gpio->line = tvdac_gpio[1] >> 4;
-                       gpio->invert = tvdac_gpio[0] & 2;
-               }
-       } else {
-               /*
-                * No systematic way to store GPIO info on pre-v2.2
-                * DCBs, try to match the PCI device IDs.
-                */
+               if (gpio[0] < 0x40) {
+                       e->entry = ROM16(entry[0]);
+                       e->tag = (e->entry & 0x07e0) >> 5;
+                       if (e->tag == 0x3f) {
+                               bios->dcb.gpio.entries--;
+                               continue;
+                       }
  
-               /* Apple iMac G4 NV18 */
-               if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
-                       struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+                       e->line = (e->entry & 0x001f);
+                       e->invert = ((e->entry & 0xf800) >> 11) != 4;
+               } else {
+                       e->entry = ROM32(entry[0]);
+                       e->tag = (e->entry & 0x0000ff00) >> 8;
+                       if (e->tag == 0xff) {
+                               bios->dcb.gpio.entries--;
+                               continue;
+                       }
  
-                       gpio->tag = DCB_GPIO_TVDAC0;
-                       gpio->line = 4;
+                       e->line = (e->entry & 0x0000001f) >> 0;
+                       e->state_default = (e->entry & 0x01000000) >> 24;
+                       e->state[0] = (e->entry & 0x18000000) >> 27;
+                       e->state[1] = (e->entry & 0x60000000) >> 29;
                }
        }
  
-       if (!gpio_table_ptr)
-               return;
-       if (entries > DCB_MAX_NUM_GPIO_ENTRIES) {
-               NV_WARN(dev, "Too many entries in the DCB GPIO table.\n");
-               entries = DCB_MAX_NUM_GPIO_ENTRIES;
+ no_table:
+       /* Apple iMac G4 NV18 */
+       if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+               e = new_gpio_entry(bios);
+               if (e) {
+                       e->tag = DCB_GPIO_TVDAC0;
+                       e->line = 4;
+               }
        }
-       for (i = 0; i < entries; i++)
-               parse_entry(bios, gpio_table_ptr + header_len + entry_len * i);
  }
  
  struct dcb_connector_table_entry *
@@@ -6680,6 -6748,8 +6748,8 @@@ static int nouveau_parse_vbios_struct(s
                                        bit_signature, sizeof(bit_signature));
        if (offset) {
                NV_TRACE(dev, "BIT BIOS found\n");
+               bios->type = NVBIOS_BIT;
+               bios->offset = offset;
                return parse_bit_structure(bios, offset + 6);
        }
  
                                        bmp_signature, sizeof(bmp_signature));
        if (offset) {
                NV_TRACE(dev, "BMP BIOS found\n");
+               bios->type = NVBIOS_BMP;
+               bios->offset = offset;
                return parse_bmp_structure(dev, bios, offset);
        }
  
@@@ -6806,6 -6878,8 +6878,8 @@@ nouveau_bios_init(struct drm_device *de
                        "running VBIOS init tables.\n");
                bios->execute = true;
        }
+       if (nouveau_force_post)
+               bios->execute = true;
  
        ret = nouveau_run_vbios_init(dev);
        if (ret)
  #include "nouveau_hw.h"
  #include "nouveau_fb.h"
  #include "nouveau_fbcon.h"
+ #include "nouveau_pm.h"
  #include "nv50_display.h"
  
  #include "drm_pciids.h"
  
- MODULE_PARM_DESC(noagp, "Disable AGP");
- int nouveau_noagp;
- module_param_named(noagp, nouveau_noagp, int, 0400);
+ MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
+ int nouveau_agpmode = -1;
+ module_param_named(agpmode, nouveau_agpmode, int, 0400);
  
  MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
  static int nouveau_modeset = -1; /* kms */
@@@ -79,6 -80,10 +80,10 @@@ MODULE_PARM_DESC(nofbaccel, "Disable fb
  int nouveau_nofbaccel = 0;
  module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
  
+ MODULE_PARM_DESC(force_post, "Force POST");
+ int nouveau_force_post = 0;
+ module_param_named(force_post, nouveau_force_post, int, 0400);
  MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
  int nouveau_override_conntype = 0;
  module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
@@@ -102,6 -107,14 +107,14 @@@ MODULE_PARM_DESC(reg_debug, "Register a
  int nouveau_reg_debug;
  module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
  
+ MODULE_PARM_DESC(perflvl, "Performance level (default: boot)\n");
+ char *nouveau_perflvl;
+ module_param_named(perflvl, nouveau_perflvl, charp, 0400);
+ MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
+ int nouveau_perflvl_wr;
+ module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
  int nouveau_fbpercrtc;
  #if 0
  module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@@ -271,6 -284,8 +284,8 @@@ nouveau_pci_resume(struct pci_dev *pdev
        if (ret)
                return ret;
  
+       nouveau_pm_resume(dev);
        if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
                ret = nouveau_mem_init_agp(dev);
                if (ret) {
@@@ -379,8 -394,6 +394,6 @@@ static struct drm_driver driver = 
        .irq_uninstall = nouveau_irq_uninstall,
        .irq_handler = nouveau_irq_handler,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = nouveau_ioctls,
        .fops = {
                .owner = THIS_MODULE,
  #if defined(CONFIG_COMPAT)
                .compat_ioctl = nouveau_compat_ioctl,
  #endif
 +              .llseek = noop_llseek,
        },
        .pci_driver = {
                .name = DRIVER_NAME,
@@@ -24,6 -24,7 +24,6 @@@
  #define __NOUVEAU_I2C_H__
  
  #include <linux/i2c.h>
 -#include <linux/i2c-id.h>
  #include <linux/i2c-algo-bit.h>
  #include "drm_dp_helper.h"
  
@@@ -43,7 -44,10 +43,10 @@@ void nouveau_i2c_fini(struct drm_devic
  struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
  bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
  int nouveau_i2c_identify(struct drm_device *dev, const char *what,
-                        struct i2c_board_info *info, int index);
+                        struct i2c_board_info *info,
+                        bool (*match)(struct nouveau_i2c_chan *,
+                                      struct i2c_board_info *),
+                        int index);
  
  extern const struct i2c_algorithm nouveau_dp_i2c_algo;
  
@@@ -56,8 -56,6 +56,6 @@@ static struct drm_driver driver = 
        .irq_uninstall = r128_driver_irq_uninstall,
        .irq_handler = r128_driver_irq_handler,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = r128_ioctls,
        .dma_ioctl = r128_cce_buffers,
        .fops = {
@@@ -71,7 -69,6 +69,7 @@@
  #ifdef CONFIG_COMPAT
                .compat_ioctl = r128_compat_ioctl,
  #endif
 +              .llseek = noop_llseek,
        },
        .pci_driver = {
                .name = DRIVER_NAME,
   * - 2.4.0 - add crtc id query
   * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
   * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
+  *   2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
   */
  #define KMS_DRIVER_MAJOR      2
- #define KMS_DRIVER_MINOR      6
+ #define KMS_DRIVER_MINOR      7
  #define KMS_DRIVER_PATCHLEVEL 0
  int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
  int radeon_driver_unload_kms(struct drm_device *dev);
@@@ -93,7 -94,6 +94,6 @@@ int radeon_benchmarking = 0
  int radeon_testing = 0;
  int radeon_connector_table = 0;
  int radeon_tv = 1;
- int radeon_new_pll = -1;
  int radeon_audio = 1;
  int radeon_disp_priority = 0;
  int radeon_hw_i2c = 0;
@@@ -131,9 -131,6 +131,6 @@@ module_param_named(connector_table, rad
  MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
  module_param_named(tv, radeon_tv, int, 0444);
  
- MODULE_PARM_DESC(new_pll, "Select new PLL code");
- module_param_named(new_pll, radeon_new_pll, int, 0444);
  MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
  module_param_named(audio, radeon_audio, int, 0444);
  
@@@ -203,8 -200,6 +200,6 @@@ static struct drm_driver driver_old = 
        .irq_uninstall = radeon_driver_irq_uninstall,
        .irq_handler = radeon_driver_irq_handler,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = radeon_ioctls,
        .dma_ioctl = radeon_cp_buffers,
        .fops = {
  #ifdef CONFIG_COMPAT
                 .compat_ioctl = radeon_compat_ioctl,
  #endif
 +               .llseek = noop_llseek,
        },
  
        .pci_driver = {
@@@ -291,8 -285,6 +286,6 @@@ static struct drm_driver kms_driver = 
        .irq_uninstall = radeon_driver_irq_uninstall_kms,
        .irq_handler = radeon_driver_irq_handler_kms,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = radeon_ioctls_kms,
        .gem_init_object = radeon_gem_object_init,
        .gem_free_object = radeon_gem_object_free,
@@@ -35,7 -35,9 +35,8 @@@
  #include <drm_edid.h>
  #include <drm_dp_helper.h>
  #include <drm_fixed.h>
+ #include <drm_crtc_helper.h>
  #include <linux/i2c.h>
 -#include <linux/i2c-id.h>
  #include <linux/i2c-algo-bit.h>
  
  struct radeon_bo;
@@@ -149,12 -151,6 +150,6 @@@ struct radeon_tmds_pll 
  #define RADEON_PLL_USE_POST_DIV         (1 << 12)
  #define RADEON_PLL_IS_LCD               (1 << 13)
  
- /* pll algo */
- enum radeon_pll_algo {
-       PLL_ALGO_LEGACY,
-       PLL_ALGO_NEW
- };
  struct radeon_pll {
        /* reference frequency */
        uint32_t reference_freq;
  
        /* pll id */
        uint32_t id;
-       /* pll algo */
-       enum radeon_pll_algo algo;
  };
  
  struct radeon_i2c_chan {
@@@ -240,6 -234,8 +233,8 @@@ struct radeon_mode_info 
        struct drm_property *tmds_pll_property;
        /* underscan */
        struct drm_property *underscan_property;
+       struct drm_property *underscan_hborder_property;
+       struct drm_property *underscan_vborder_property;
        /* hardcoded DFP edid from BIOS */
        struct edid *bios_hardcoded_edid;
  
@@@ -335,22 -331,24 +330,24 @@@ struct radeon_encoder_ext_tmds 
  struct radeon_atom_ss {
        uint16_t percentage;
        uint8_t type;
-       uint8_t step;
+       uint16_t step;
        uint8_t delay;
        uint8_t range;
        uint8_t refdiv;
+       /* asic_ss */
+       uint16_t rate;
+       uint16_t amount;
  };
  
  struct radeon_encoder_atom_dig {
        bool linkb;
        /* atom dig */
        bool coherent_mode;
-       int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
-       /* atom lvds */
-       uint32_t lvds_misc;
+       int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */
+       /* atom lvds/edp */
+       uint32_t lcd_misc;
        uint16_t panel_pwr_delay;
-       enum radeon_pll_algo pll_algo;
-       struct radeon_atom_ss *ss;
+       uint32_t lcd_ss_id;
        /* panel mode */
        struct drm_display_mode native_mode;
  };
@@@ -369,6 -367,8 +366,8 @@@ struct radeon_encoder 
        uint32_t pixel_clock;
        enum radeon_rmx_type rmx_type;
        enum radeon_underscan_type underscan_type;
+       uint32_t underscan_hborder;
+       uint32_t underscan_vborder;
        struct drm_display_mode native_mode;
        void *enc_priv;
        int audio_polling_active;
@@@ -435,6 -435,11 +434,11 @@@ struct radeon_framebuffer 
        struct drm_gem_object *obj;
  };
  
+ /* radeon_get_crtc_scanoutpos() return flags */
+ #define RADEON_SCANOUTPOS_VALID        (1 << 0)
+ #define RADEON_SCANOUTPOS_INVBL        (1 << 1)
+ #define RADEON_SCANOUTPOS_ACCURATE     (1 << 2)
  extern enum radeon_tv_std
  radeon_combios_get_tv_info(struct radeon_device *rdev);
  extern enum radeon_tv_std
@@@ -490,6 -495,13 +494,13 @@@ extern int radeon_ddc_get_modes(struct 
  
  extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
  
+ extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+                                            struct radeon_atom_ss *ss,
+                                            int id);
+ extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+                                            struct radeon_atom_ss *ss,
+                                            int id, u32 clock);
  extern void radeon_compute_pll(struct radeon_pll *pll,
                               uint64_t freq,
                               uint32_t *dot_clock_p,
@@@ -513,6 -525,10 +524,10 @@@ extern void radeon_encoder_set_active_d
  extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
  extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
                                   struct drm_framebuffer *old_fb);
+ extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+                                        struct drm_framebuffer *fb,
+                                        int x, int y,
+                                        enum mode_set_atomic state);
  extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
                                   struct drm_display_mode *mode,
                                   struct drm_display_mode *adjusted_mode,
@@@ -522,7 -538,13 +537,13 @@@ extern void atombios_crtc_dpms(struct d
  
  extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
                                 struct drm_framebuffer *old_fb);
+ extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+                                      struct drm_framebuffer *fb,
+                                      int x, int y,
+                                      enum mode_set_atomic state);
+ extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+                                  struct drm_framebuffer *fb,
+                                  int x, int y, int atomic);
  extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
                                  struct drm_file *file_priv,
                                  uint32_t handle,
  extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
                                   int x, int y);
  
+ extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos);
  extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
  extern struct edid *
  radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
@@@ -152,6 -152,7 +152,7 @@@ static int radeon_init_mem_type(struct 
                man->default_caching = TTM_PL_FLAG_CACHED;
                break;
        case TTM_PL_TT:
+               man->func = &ttm_bo_manager_func;
                man->gpu_offset = rdev->mc.gtt_start;
                man->available_caching = TTM_PL_MASK_CACHING;
                man->default_caching = TTM_PL_FLAG_CACHED;
                break;
        case TTM_PL_VRAM:
                /* "On-card" video ram */
+               man->func = &ttm_bo_manager_func;
                man->gpu_offset = rdev->mc.vram_start;
                man->flags = TTM_MEMTYPE_FLAG_FIXED |
                             TTM_MEMTYPE_FLAG_MAPPABLE;
@@@ -246,8 -248,8 +248,8 @@@ static int radeon_move_blit(struct ttm_
        if (unlikely(r)) {
                return r;
        }
-       old_start = old_mem->mm_node->start << PAGE_SHIFT;
-       new_start = new_mem->mm_node->start << PAGE_SHIFT;
+       old_start = old_mem->start << PAGE_SHIFT;
+       new_start = new_mem->start << PAGE_SHIFT;
  
        switch (old_mem->mem_type) {
        case TTM_PL_VRAM:
@@@ -326,14 -328,7 +328,7 @@@ static int radeon_move_vram_ram(struct 
        }
        r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
  out_cleanup:
-       if (tmp_mem.mm_node) {
-               struct ttm_bo_global *glob = rdev->mman.bdev.glob;
-               spin_lock(&glob->lru_lock);
-               drm_mm_put_block(tmp_mem.mm_node);
-               spin_unlock(&glob->lru_lock);
-               return r;
-       }
+       ttm_bo_mem_put(bo, &tmp_mem);
        return r;
  }
  
@@@ -372,14 -367,7 +367,7 @@@ static int radeon_move_ram_vram(struct 
                goto out_cleanup;
        }
  out_cleanup:
-       if (tmp_mem.mm_node) {
-               struct ttm_bo_global *glob = rdev->mman.bdev.glob;
-               spin_lock(&glob->lru_lock);
-               drm_mm_put_block(tmp_mem.mm_node);
-               spin_unlock(&glob->lru_lock);
-               return r;
-       }
+       ttm_bo_mem_put(bo, &tmp_mem);
        return r;
  }
  
@@@ -449,14 -437,14 +437,14 @@@ static int radeon_ttm_io_mem_reserve(st
  #if __OS_HAS_AGP
                if (rdev->flags & RADEON_IS_AGP) {
                        /* RADEON_IS_AGP is set only if AGP is active */
-                       mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+                       mem->bus.offset = mem->start << PAGE_SHIFT;
                        mem->bus.base = rdev->mc.agp_base;
                        mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
                }
  #endif
                break;
        case TTM_PL_VRAM:
-               mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+               mem->bus.offset = mem->start << PAGE_SHIFT;
                /* check if it's visible */
                if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
                        return -EINVAL;
@@@ -631,7 -619,7 +619,7 @@@ int radeon_mmap(struct file *filp, stru
                return drm_mmap(filp, vma);
        }
  
 -      file_priv = (struct drm_file *)filp->private_data;
 +      file_priv = filp->private_data;
        rdev = file_priv->minor->dev->dev_private;
        if (rdev == NULL) {
                return -EINVAL;
@@@ -699,7 -687,7 +687,7 @@@ static int radeon_ttm_backend_bind(stru
        int r;
  
        gtt = container_of(backend, struct radeon_ttm_backend, backend);
-       gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
+       gtt->offset = bo_mem->start << PAGE_SHIFT;
        if (!gtt->num_pages) {
                WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
        }
@@@ -798,9 -786,9 +786,9 @@@ static int radeon_ttm_debugfs_init(stru
                radeon_mem_types_list[i].show = &radeon_mm_dump_table;
                radeon_mem_types_list[i].driver_features = 0;
                if (i == 0)
-                       radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
+                       radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
                else
-                       radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
+                       radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
  
        }
        /* Add ttm page pool to debugfs */
@@@ -42,8 -42,6 +42,6 @@@ static struct drm_driver driver = 
        .lastclose = savage_driver_lastclose,
        .unload = savage_driver_unload,
        .reclaim_buffers = savage_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = savage_ioctls,
        .dma_ioctl = savage_bci_buffers,
        .fops = {
@@@ -54,7 -52,6 +52,7 @@@
                 .mmap = drm_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
 +               .llseek = noop_llseek,
        },
  
        .pci_driver = {
@@@ -67,13 -67,10 +67,10 @@@ static struct drm_driver driver = 
        .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
        .load = sis_driver_load,
        .unload = sis_driver_unload,
-       .context_dtor = NULL,
        .dma_quiescent = sis_idle,
        .reclaim_buffers = NULL,
        .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
        .lastclose = sis_lastclose,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = sis_ioctls,
        .fops = {
                 .owner = THIS_MODULE,
@@@ -83,7 -80,6 +80,7 @@@
                 .mmap = drm_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
 +               .llseek = noop_llseek,
        },
        .pci_driver = {
                 .name = DRIVER_NAME,
@@@ -42,8 -42,6 +42,6 @@@ static struct pci_device_id pciidlist[
  static struct drm_driver driver = {
        .driver_features = DRIVER_USE_MTRR,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .fops = {
                 .owner = THIS_MODULE,
                 .open = drm_open,
@@@ -52,7 -50,6 +50,7 @@@
                 .mmap = drm_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
 +               .llseek = noop_llseek,
        },
        .pci_driver = {
                 .name = DRIVER_NAME,
  
  void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
  {
-       struct ttm_mem_reg *old_mem = &bo->mem;
-       if (old_mem->mm_node) {
-               spin_lock(&bo->glob->lru_lock);
-               drm_mm_put_block(old_mem->mm_node);
-               spin_unlock(&bo->glob->lru_lock);
-       }
-       old_mem->mm_node = NULL;
+       ttm_bo_mem_put(bo, &bo->mem);
  }
  
  int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
@@@ -170,7 -163,7 +163,7 @@@ static int ttm_copy_io_ttm_page(struct 
        src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
  
  #ifdef CONFIG_X86
 -      dst = kmap_atomic_prot(d, KM_USER0, prot);
 +      dst = kmap_atomic_prot(d, prot);
  #else
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
                dst = vmap(&d, 1, 0, prot);
        memcpy_fromio(dst, src, PAGE_SIZE);
  
  #ifdef CONFIG_X86
 -      kunmap_atomic(dst, KM_USER0);
 +      kunmap_atomic(dst);
  #else
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
                vunmap(dst);
@@@ -206,7 -199,7 +199,7 @@@ static int ttm_copy_ttm_io_page(struct 
  
        dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
  #ifdef CONFIG_X86
 -      src = kmap_atomic_prot(s, KM_USER0, prot);
 +      src = kmap_atomic_prot(s, prot);
  #else
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
                src = vmap(&s, 1, 0, prot);
        memcpy_toio(dst, src, PAGE_SIZE);
  
  #ifdef CONFIG_X86
 -      kunmap_atomic(src, KM_USER0);
 +      kunmap_atomic(src);
  #else
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
                vunmap(src);
@@@ -263,8 -256,7 +256,7 @@@ int ttm_bo_move_memcpy(struct ttm_buffe
        dir = 1;
  
        if ((old_mem->mem_type == new_mem->mem_type) &&
-           (new_mem->mm_node->start <
-            old_mem->mm_node->start + old_mem->mm_node->size)) {
+           (new_mem->start < old_mem->start + old_mem->size)) {
                dir = -1;
                add = new_mem->num_pages - 1;
        }
@@@ -51,8 -51,6 +51,6 @@@ static struct drm_driver driver = 
        .reclaim_buffers_locked = NULL,
        .reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
        .lastclose = via_lastclose,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = via_ioctls,
        .fops = {
                .owner = THIS_MODULE,
@@@ -62,7 -60,6 +60,7 @@@
                .mmap = drm_mmap,
                .poll = drm_poll,
                .fasync = drm_fasync,
 +              .llseek = noop_llseek,
                },
        .pci_driver = {
                .name = DRIVER_NAME,
@@@ -260,13 -260,11 +260,11 @@@ static int vmw_driver_load(struct drm_d
        idr_init(&dev_priv->context_idr);
        idr_init(&dev_priv->surface_idr);
        idr_init(&dev_priv->stream_idr);
-       ida_init(&dev_priv->gmr_ida);
        mutex_init(&dev_priv->init_mutex);
        init_waitqueue_head(&dev_priv->fence_queue);
        init_waitqueue_head(&dev_priv->fifo_queue);
        atomic_set(&dev_priv->fence_queue_waiters, 0);
        atomic_set(&dev_priv->fifo_queue_waiters, 0);
-       INIT_LIST_HEAD(&dev_priv->gmr_lru);
  
        dev_priv->io_start = pci_resource_start(dev->pdev, 0);
        dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
                goto out_err2;
        }
  
+       dev_priv->has_gmr = true;
+       if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+                          dev_priv->max_gmr_ids) != 0) {
+               DRM_INFO("No GMR memory available. "
+                        "Graphics memory resources are very limited.\n");
+               dev_priv->has_gmr = false;
+       }
        dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
                                           dev_priv->mmio_size, DRM_MTRR_WC);
  
@@@ -440,13 -446,14 +446,14 @@@ out_err4
  out_err3:
        drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
                     dev_priv->mmio_size, DRM_MTRR_WC);
+       if (dev_priv->has_gmr)
+               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
        (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
  out_err2:
        (void)ttm_bo_device_release(&dev_priv->bdev);
  out_err1:
        vmw_ttm_global_release(dev_priv);
  out_err0:
-       ida_destroy(&dev_priv->gmr_ida);
        idr_destroy(&dev_priv->surface_idr);
        idr_destroy(&dev_priv->context_idr);
        idr_destroy(&dev_priv->stream_idr);
@@@ -478,10 -485,11 +485,11 @@@ static int vmw_driver_unload(struct drm
        iounmap(dev_priv->mmio_virt);
        drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
                     dev_priv->mmio_size, DRM_MTRR_WC);
+       if (dev_priv->has_gmr)
+               (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
        (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
        (void)ttm_bo_device_release(&dev_priv->bdev);
        vmw_ttm_global_release(dev_priv);
-       ida_destroy(&dev_priv->gmr_ida);
        idr_destroy(&dev_priv->surface_idr);
        idr_destroy(&dev_priv->context_idr);
        idr_destroy(&dev_priv->stream_idr);
@@@ -597,6 -605,8 +605,8 @@@ static void vmw_lastclose(struct drm_de
  static void vmw_master_init(struct vmw_master *vmaster)
  {
        ttm_lock_init(&vmaster->lock);
+       INIT_LIST_HEAD(&vmaster->fb_surf);
+       mutex_init(&vmaster->fb_surf_mutex);
  }
  
  static int vmw_master_create(struct drm_device *dev,
        if (unlikely(vmaster == NULL))
                return -ENOMEM;
  
-       ttm_lock_init(&vmaster->lock);
+       vmw_master_init(vmaster);
        ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
        master->driver_priv = vmaster;
  
@@@ -699,6 -709,7 +709,7 @@@ static void vmw_master_drop(struct drm_
  
        vmw_fp->locked_master = drm_master_get(file_priv->master);
        ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
+       vmw_kms_idle_workqueues(vmaster);
  
        if (unlikely((ret != 0))) {
                DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@@ -751,15 -762,16 +762,16 @@@ static int vmwgfx_pm_notifier(struct no
                 * Buffer contents is moved to swappable memory.
                 */
                ttm_bo_swapout_all(&dev_priv->bdev);
                break;
        case PM_POST_HIBERNATION:
        case PM_POST_SUSPEND:
+       case PM_POST_RESTORE:
                ttm_suspend_unlock(&vmaster->lock);
                break;
        case PM_RESTORE_PREPARE:
                break;
-       case PM_POST_RESTORE:
-               break;
        default:
                break;
        }
   * These might not be needed with the virtual SVGA device.
   */
  
- int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  {
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       if (dev_priv->num_3d_resources != 0) {
+               DRM_INFO("Can't suspend or hibernate "
+                        "while 3D resources are active.\n");
+               return -EBUSY;
+       }
        pci_save_state(pdev);
        pci_disable_device(pdev);
        pci_set_power_state(pdev, PCI_D3hot);
        return 0;
  }
  
- int vmw_pci_resume(struct pci_dev *pdev)
static int vmw_pci_resume(struct pci_dev *pdev)
  {
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
        return pci_enable_device(pdev);
  }
  
+ static int vmw_pm_suspend(struct device *kdev)
+ {
+       struct pci_dev *pdev = to_pci_dev(kdev);
+       struct pm_message dummy;
+       dummy.event = 0;
+       return vmw_pci_suspend(pdev, dummy);
+ }
+ static int vmw_pm_resume(struct device *kdev)
+ {
+       struct pci_dev *pdev = to_pci_dev(kdev);
+       return vmw_pci_resume(pdev);
+ }
+ static int vmw_pm_prepare(struct device *kdev)
+ {
+       struct pci_dev *pdev = to_pci_dev(kdev);
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       /**
+        * Release 3d reference held by fbdev and potentially
+        * stop fifo.
+        */
+       dev_priv->suspended = true;
+       if (dev_priv->enable_fb)
+               vmw_3d_resource_dec(dev_priv);
+       if (dev_priv->num_3d_resources != 0) {
+               DRM_INFO("Can't suspend or hibernate "
+                        "while 3D resources are active.\n");
+               if (dev_priv->enable_fb)
+                       vmw_3d_resource_inc(dev_priv);
+               dev_priv->suspended = false;
+               return -EBUSY;
+       }
+       return 0;
+ }
+ static void vmw_pm_complete(struct device *kdev)
+ {
+       struct pci_dev *pdev = to_pci_dev(kdev);
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       /**
+        * Reclaim 3d reference held by fbdev and potentially
+        * start fifo.
+        */
+       if (dev_priv->enable_fb)
+               vmw_3d_resource_inc(dev_priv);
+       dev_priv->suspended = false;
+ }
+ static const struct dev_pm_ops vmw_pm_ops = {
+       .prepare = vmw_pm_prepare,
+       .complete = vmw_pm_complete,
+       .suspend = vmw_pm_suspend,
+       .resume = vmw_pm_resume,
+ };
  static struct drm_driver driver = {
        .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
        DRIVER_MODESET,
        .irq_handler = vmw_irq_handler,
        .get_vblank_counter = vmw_get_vblank_counter,
        .reclaim_buffers_locked = NULL,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = vmw_ioctls,
        .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
        .dma_quiescent = NULL,  /*vmw_dma_quiescent, */
  #if defined(CONFIG_COMPAT)
                 .compat_ioctl = drm_compat_ioctl,
  #endif
-                },
 +               .llseek = noop_llseek,
+       },
        .pci_driver = {
-                      .name = VMWGFX_DRIVER_NAME,
-                      .id_table = vmw_pci_id_list,
-                      .probe = vmw_probe,
-                      .remove = vmw_remove,
-                      .suspend = vmw_pci_suspend,
-                      .resume = vmw_pci_resume
-                      },
+                .name = VMWGFX_DRIVER_NAME,
+                .id_table = vmw_pci_id_list,
+                .probe = vmw_probe,
+                .remove = vmw_remove,
+                .driver = {
+                        .pm = &vmw_pm_ops
+                }
+        },
        .name = VMWGFX_DRIVER_NAME,
        .desc = VMWGFX_DRIVER_DESC,
        .date = VMWGFX_DRIVER_DATE,
@@@ -863,3 -950,7 +951,7 @@@ module_exit(vmwgfx_exit)
  MODULE_AUTHOR("VMware Inc. and others");
  MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
  MODULE_LICENSE("GPL and additional rights");
+ MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
+              __stringify(VMWGFX_DRIVER_MINOR) "."
+              __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
+              "0");
diff --combined drivers/video/Kconfig
@@@ -17,6 -17,8 +17,8 @@@ source "drivers/gpu/vga/Kconfig
  
  source "drivers/gpu/drm/Kconfig"
  
+ source "drivers/gpu/stub/Kconfig"
  config VGASTATE
         tristate
         default n
@@@ -49,7 -51,7 +51,7 @@@ menuconfig F
          You need an utility program called fbset to make full use of frame
          buffer devices. Please read <file:Documentation/fb/framebuffer.txt>
          and the Framebuffer-HOWTO at
 -        <http://www.munted.org.uk/programming/Framebuffer-HOWTO-1.2.html> for more
 +        <http://www.munted.org.uk/programming/Framebuffer-HOWTO-1.3.html> for more
          information.
  
          Say Y here and to the driver for your graphics board below if you
@@@ -955,7 -957,7 +957,7 @@@ config FB_EPSON135
          Build in support for the SED1355 Epson Research Embedded RAMDAC
          LCD/CRT Controller (since redesignated as the S1D13505) as a
          framebuffer.  Product specs at
 -        <http://www.erd.epson.com/vdc/html/products.htm>.
 +        <http://vdc.epson.com/>.
  
  config FB_S1D13XXX
        tristate "Epson S1D13XXX framebuffer support"
        help
          Support for S1D13XXX framebuffer device family (currently only
          working with S1D13806). Product specs at
 -        <http://www.erd.epson.com/vdc/html/legacy_13xxx.htm>
 +        <http://vdc.epson.com/>
  
  config FB_ATMEL
        tristate "AT91/AT32 LCD Controller support"
@@@ -1323,7 -1325,7 +1325,7 @@@ config FB_RADEO
          don't need to choose this to run the Radeon in plain VGA mode.
  
          There is a product page at
 -        http://apps.ati.com/ATIcompare/
 +        http://products.amd.com/en-us/GraphicCardResult.aspx
  
  config FB_RADEON_I2C
        bool "DDC/I2C for ATI Radeon support"
@@@ -1395,7 -1397,7 +1397,7 @@@ config FB_ATY_C
          Say Y here to support use of ATI's 64-bit Rage boards (or other
          boards based on the Mach64 CT, VT, GT, and LT chipsets) as a
          framebuffer device.  The ATI product support page for these boards
 -        is at <http://support.ati.com/products/pc/mach64/>.
 +        is at <http://support.ati.com/products/pc/mach64/mach64.html>.
  
  config FB_ATY_GENERIC_LCD
        bool "Mach64 generic LCD support (EXPERIMENTAL)"
@@@ -1919,9 -1921,6 +1921,9 @@@ config FB_SH_MOBILE_HDM
        tristate "SuperH Mobile HDMI controller support"
        depends on FB_SH_MOBILE_LCDC
        select FB_MODE_HELPERS
 +      select SOUND
 +      select SND
 +      select SND_SOC
        ---help---
          Driver for the on-chip SH-Mobile HDMI controller.